data
dict |
---|
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LSKDIqNAti",
"doi": "10.1109/TVCG.2023.3260628",
"abstract": "Scientific simulations and observations using particles have been creating large datasets that require effective and efficient data reduction to store, transfer, and analyze. However, current approaches either compress only small data well while being inefficient for large data, or handle large data but with insufficient compression. Toward effective and scalable compression/decompression of particle positions, we introduce new kinds of particle hierarchies and corresponding traversal orders that quickly reduce reconstruction error while being fast and low in memory footprint. Our solution to compression of large-scale particle data is a flexible block-based hierarchy that supports progressive, random-access, and error-driven decoding, where error estimation heuristics can be supplied by the user. For low-level node encoding, we introduce new schemes that effectively compress both uniform and densely structured particle distributions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Scientific simulations and observations using particles have been creating large datasets that require effective and efficient data reduction to store, transfer, and analyze. However, current approaches either compress only small data well while being inefficient for large data, or handle large data but with insufficient compression. Toward effective and scalable compression/decompression of particle positions, we introduce new kinds of particle hierarchies and corresponding traversal orders that quickly reduce reconstruction error while being fast and low in memory footprint. Our solution to compression of large-scale particle data is a flexible block-based hierarchy that supports progressive, random-access, and error-driven decoding, where error estimation heuristics can be supplied by the user. For low-level node encoding, we introduce new schemes that effectively compress both uniform and densely structured particle distributions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Scientific simulations and observations using particles have been creating large datasets that require effective and efficient data reduction to store, transfer, and analyze. However, current approaches either compress only small data well while being inefficient for large data, or handle large data but with insufficient compression. Toward effective and scalable compression/decompression of particle positions, we introduce new kinds of particle hierarchies and corresponding traversal orders that quickly reduce reconstruction error while being fast and low in memory footprint. Our solution to compression of large-scale particle data is a flexible block-based hierarchy that supports progressive, random-access, and error-driven decoding, where error estimation heuristics can be supplied by the user. For low-level node encoding, we introduce new schemes that effectively compress both uniform and densely structured particle distributions.",
"title": "Progressive Tree-Based Compression of Large-Scale Particle Data",
"normalizedTitle": "Progressive Tree-Based Compression of Large-Scale Particle Data",
"fno": "10085988",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Encoding",
"Decoding",
"Image Reconstruction",
"Data Models",
"Computational Modeling",
"Compressors",
"Task Analysis",
"Particle Datasets",
"Compression Coding",
"Data Compaction And Compression",
"Hierarchical",
"Progressive Decompression",
"Coarse Approximation",
"Tree Traversal",
"Multiresolution",
"Visualization"
],
"authors": [
{
"givenName": "Duong",
"surname": "Hoang",
"fullName": "Duong Hoang",
"affiliation": "Scientific Computing and Imaging Institute, University of Utah, Salt Lake City, UT, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Harsh",
"surname": "Bhatia",
"fullName": "Harsh Bhatia",
"affiliation": "Center for Applied Scientific Computing, Lawrence Livermore National Laboratory, Livermore, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Peter",
"surname": "Lindstrom",
"fullName": "Peter Lindstrom",
"affiliation": "Center for Applied Scientific Computing, Lawrence Livermore National Laboratory, Livermore, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Valerio",
"surname": "Pascucci",
"fullName": "Valerio Pascucci",
"affiliation": "Scientific Computing and Imaging Institute, University of Utah, Salt Lake City, UT, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-18",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/dcc/2011/279/0/05749493",
"title": "Tree Structure Compression with RePair",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2011/05749493/12OmNxiKsd1",
"parentPublication": {
"id": "proceedings/dcc/2011/279/0",
"title": "2011 Data Compression Conference (DCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2017/3050/0/08217727",
"title": "A bucket index correction based method for compression of genomic sequencing data",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2017/08217727/12OmNzsJ7xC",
"parentPublication": {
"id": "proceedings/bibm/2017/3050/0",
"title": "2017 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2018/01/08031063",
"title": "Optimization of Error-Bounded Lossy Compression for Hard-to-Compress HPC Data",
"doi": null,
"abstractUrl": "/journal/td/2018/01/08031063/13rRUNvgz9t",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2021/3902/0/09671954",
"title": "Improving Lossy Compression for SZ by Exploring the Best-Fit Lossless Compression Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2021/09671954/1A8gzQrzAeQ",
"parentPublication": {
"id": "proceedings/big-data/2021/3902/0",
"title": "2021 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2022/8045/0/10020345",
"title": "Towards Guaranteeing Error Bound in DCT-based Lossy Compression",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2022/10020345/1KfSYuOtYaI",
"parentPublication": {
"id": "proceedings/big-data/2022/8045/0",
"title": "2022 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2020/07/08821342",
"title": "Compression Ratio Modeling and Estimation across Error Bounds for Lossy Compression",
"doi": null,
"abstractUrl": "/journal/td/2020/07/08821342/1eTOP5vYDhm",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcmeim/2019/5045/0/504500a508",
"title": "Image Compression Algorithms Based on Dual Tree-Complex Wavelet Transform",
"doi": null,
"abstractUrl": "/proceedings-article/wcmeim/2019/504500a508/1hHLqx7CE9i",
"parentPublication": {
"id": "proceedings/wcmeim/2019/5045/0",
"title": "2019 2nd World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2020/07/08989806",
"title": "Performance Optimization for Relative-Error-Bounded Lossy Compression on Scientific Data",
"doi": null,
"abstractUrl": "/journal/td/2020/07/08989806/1hlpwaAlRGE",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/bd/2023/01/09380370",
"title": "High-Ratio Lossy Compression: Exploring the Autoencoder to Compress Scientific Data",
"doi": null,
"abstractUrl": "/journal/bd/2023/01/09380370/1s2FYtnQsZq",
"parentPublication": {
"id": "trans/bd",
"title": "IEEE Transactions on Big Data",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ldav/2021/3283/0/328300a032",
"title": "High-Quality and Low-Memory-Footprint Progressive Decoding of Large-Scale Particle Data",
"doi": null,
"abstractUrl": "/proceedings-article/ldav/2021/328300a032/1zdPAE86Zhu",
"parentPublication": {
"id": "proceedings/ldav/2021/3283/0",
"title": "2021 IEEE 11th Symposium on Large Data Analysis and Visualization (LDAV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10083324",
"articleId": "1LSKC3EiUkU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10091196",
"articleId": "1M2IJv0x8as",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1M9lKG9lkNG",
"name": "ttg555501-010085988s1-supp1-3260628.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010085988s1-supp1-3260628.pdf",
"extension": "pdf",
"size": "259 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LRbRtJhrG0",
"doi": "10.1109/TVCG.2023.3261935",
"abstract": "Vision transformer (ViT) expands the success of transformer models from sequential data to images. The model decomposes an image into many smaller patches and arranges them into a sequence. Multi-head self-attentions are then applied to the sequence to learn the attention between patches. Despite many successful interpretations of transformers on sequential data, little effort has been devoted to the interpretation of ViTs, and many questions remain unanswered. For example, among the numerous attention heads, which one is more important? How strong are individual patches attending to their spatial neighbors in different heads? What attention patterns have individual heads learned? In this work, we answer these questions through a visual analytics approach. Specifically, we first identify <bold>what</bold> heads are more important in ViTs by introducing multiple pruning-based metrics. Then, we profile the spatial distribution of attention strengths between patches inside individual heads, as well as the trend of attention strengths across attention layers. Third, using an autoencoder-based learning solution, we summarize all possible attention patterns that individual heads could learn. Examining the attention strengths and patterns of the important heads, we answer <bold>why</bold> they are important. Through concrete case studies with experienced deep learning experts on multiple ViTs, we validate the effectiveness of our solution that deepens the understanding of ViTs from <italic>head importance, head attention strength</italic>, and <italic>head attention pattern</italic>.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Vision transformer (ViT) expands the success of transformer models from sequential data to images. The model decomposes an image into many smaller patches and arranges them into a sequence. Multi-head self-attentions are then applied to the sequence to learn the attention between patches. Despite many successful interpretations of transformers on sequential data, little effort has been devoted to the interpretation of ViTs, and many questions remain unanswered. For example, among the numerous attention heads, which one is more important? How strong are individual patches attending to their spatial neighbors in different heads? What attention patterns have individual heads learned? In this work, we answer these questions through a visual analytics approach. Specifically, we first identify <bold>what</bold> heads are more important in ViTs by introducing multiple pruning-based metrics. Then, we profile the spatial distribution of attention strengths between patches inside individual heads, as well as the trend of attention strengths across attention layers. Third, using an autoencoder-based learning solution, we summarize all possible attention patterns that individual heads could learn. Examining the attention strengths and patterns of the important heads, we answer <bold>why</bold> they are important. Through concrete case studies with experienced deep learning experts on multiple ViTs, we validate the effectiveness of our solution that deepens the understanding of ViTs from <italic>head importance, head attention strength</italic>, and <italic>head attention pattern</italic>.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Vision transformer (ViT) expands the success of transformer models from sequential data to images. The model decomposes an image into many smaller patches and arranges them into a sequence. Multi-head self-attentions are then applied to the sequence to learn the attention between patches. Despite many successful interpretations of transformers on sequential data, little effort has been devoted to the interpretation of ViTs, and many questions remain unanswered. For example, among the numerous attention heads, which one is more important? How strong are individual patches attending to their spatial neighbors in different heads? What attention patterns have individual heads learned? In this work, we answer these questions through a visual analytics approach. Specifically, we first identify what heads are more important in ViTs by introducing multiple pruning-based metrics. Then, we profile the spatial distribution of attention strengths between patches inside individual heads, as well as the trend of attention strengths across attention layers. Third, using an autoencoder-based learning solution, we summarize all possible attention patterns that individual heads could learn. Examining the attention strengths and patterns of the important heads, we answer why they are important. Through concrete case studies with experienced deep learning experts on multiple ViTs, we validate the effectiveness of our solution that deepens the understanding of ViTs from head importance, head attention strength, and head attention pattern.",
"title": "How Does Attention Work in Vision Transformers? A Visual Analytics Attempt",
"normalizedTitle": "How Does Attention Work in Vision Transformers? A Visual Analytics Attempt",
"fno": "10081322",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Head",
"Transformers",
"Visual Analytics",
"Task Analysis",
"Measurement",
"Heating Systems",
"Deep Learning",
"Vision Transformer",
"Multi Head Self Attention",
"Deep Learning",
"Explainable Artificial Intelligence",
"Visual Analytics"
],
"authors": [
{
"givenName": "Yiran",
"surname": "Li",
"fullName": "Yiran Li",
"affiliation": "University of California, Davis, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Junpeng",
"surname": "Wang",
"fullName": "Junpeng Wang",
"affiliation": "Visa Research, Palo Alto, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xin",
"surname": "Dai",
"fullName": "Xin Dai",
"affiliation": "Visa Research, Palo Alto, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Liang",
"surname": "Wang",
"fullName": "Liang Wang",
"affiliation": "Visa Research, Palo Alto, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chin-Chia Michael",
"surname": "Yeh",
"fullName": "Chin-Chia Michael Yeh",
"affiliation": "Visa Research, Palo Alto, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yan",
"surname": "Zheng",
"fullName": "Yan Zheng",
"affiliation": "Visa Research, Palo Alto, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wei",
"surname": "Zhang",
"fullName": "Wei Zhang",
"affiliation": "Visa Research, Palo Alto, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kwan-Liu",
"surname": "Ma",
"fullName": "Kwan-Liu Ma",
"affiliation": "University of California, Davis, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tb/2023/02/09772392",
"title": "MCWS-Transformers: Towards an Efficient Modeling of Protein Sequences via Multi Context-Window Based Scaled Self-Attention",
"doi": null,
"abstractUrl": "/journal/tb/2023/02/09772392/1DgjtIA9NkI",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900d819",
"title": "Towards Exemplar-Free Continual Learning in Vision Transformers: an Account of Attention, Functional and Weight Regularization",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900d819/1G56zb8vJGU",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/09903081",
"title": "Token Selection is a Simple Booster for Vision Transformers",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/09903081/1GZog5DAM6s",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/09878209",
"title": "StARformer: Transformer with State-Action-Reward Representations for Robot Learning",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/09878209/1GrP68bmAuI",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600m2299",
"title": "AdaViT: Adaptive Vision Transformers for Efficient Image Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600m2299/1H0LFD5xsCQ",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600a150",
"title": "Meta-attention for ViT-backed Continual Learning",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600a150/1H1hvknszFS",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/5555/01/09935321",
"title": "A Diversified Attention Model for Interpretable Multiple Clusterings",
"doi": null,
"abstractUrl": "/journal/tk/5555/01/09935321/1HYqzVaX32g",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600c777",
"title": "Multi-level Contrastive Learning for Self-Supervised Vision Transformers",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600c777/1KxUTlCRbZS",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpca/2023/7652/0/10071047",
"title": "HeatViT: Hardware-Efficient Adaptive Token Pruning for Vision Transformers",
"doi": null,
"abstractUrl": "/proceedings-article/hpca/2023/10071047/1LMbGetPrxu",
"parentPublication": {
"id": "proceedings/hpca/2023/7652/0",
"title": "2023 IEEE International Symposium on High-Performance Computer Architecture (HPCA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2021/1865/0/186500a267",
"title": "Multimodal Machine Translation Enhancement by Fusing Multimodal-attention and Fine-grained Image Features",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2021/186500a267/1xPsmkH0m40",
"parentPublication": {
"id": "proceedings/mipr/2021/1865/0",
"title": "2021 IEEE 4th International Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10081398",
"articleId": "1LRbRjcZeLK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10081427",
"articleId": "1LRbRR2niSs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1M9lJYF1EeQ",
"name": "ttg555501-010081322s1-supp2-3261935.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010081322s1-supp2-3261935.mp4",
"extension": "mp4",
"size": "143 MB",
"__typename": "WebExtraType"
},
{
"id": "1M9lKiADxrG",
"name": "ttg555501-010081322s1-supp1-3261935.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010081322s1-supp1-3261935.pdf",
"extension": "pdf",
"size": "51 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LRbSaS1ClW",
"doi": "10.1109/TVCG.2023.3262039",
"abstract": "Image collage is a very useful tool for visualizing an image collection. Most of the existing methods and commercial applications for generating image collages are designed on simple shapes, such as rectangular and circular layouts. This greatly limits the use of image collages in some artistic and creative settings. Although there are some methods that can generate irregularly-shaped image collages, they often suffer from severe image overlapping and excessive blank space. This prevents such methods from being effective information communication tools. In this paper, we present a shape slicing algorithm and an optimization scheme that can create image collages of arbitrary shapes in an informative and visually pleasing manner given an input shape and an image collection. To overcome the challenge of irregular shapes, we propose a novel algorithm, called <italic>Shape-Aware Slicing</italic>, which partitions the input shape into cells based on medial axis and binary slicing tree. <italic>Shape-Aware Slicing</italic>, which is designed specifically for irregular shapes, takes human perception and shape structure into account to generate visually pleasing partitions. Then, the layout is optimized by analyzing input images with the goal of maximizing the total salient regions of the images. To evaluate our method, we conduct extensive experiments and compare our results against previous work. The evaluations show that our proposed algorithm can efficiently arrange image collections on irregular shapes and create visually superior results than prior work and existing commercial tools.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Image collage is a very useful tool for visualizing an image collection. Most of the existing methods and commercial applications for generating image collages are designed on simple shapes, such as rectangular and circular layouts. This greatly limits the use of image collages in some artistic and creative settings. Although there are some methods that can generate irregularly-shaped image collages, they often suffer from severe image overlapping and excessive blank space. This prevents such methods from being effective information communication tools. In this paper, we present a shape slicing algorithm and an optimization scheme that can create image collages of arbitrary shapes in an informative and visually pleasing manner given an input shape and an image collection. To overcome the challenge of irregular shapes, we propose a novel algorithm, called <italic>Shape-Aware Slicing</italic>, which partitions the input shape into cells based on medial axis and binary slicing tree. <italic>Shape-Aware Slicing</italic>, which is designed specifically for irregular shapes, takes human perception and shape structure into account to generate visually pleasing partitions. Then, the layout is optimized by analyzing input images with the goal of maximizing the total salient regions of the images. To evaluate our method, we conduct extensive experiments and compare our results against previous work. The evaluations show that our proposed algorithm can efficiently arrange image collections on irregular shapes and create visually superior results than prior work and existing commercial tools.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Image collage is a very useful tool for visualizing an image collection. Most of the existing methods and commercial applications for generating image collages are designed on simple shapes, such as rectangular and circular layouts. This greatly limits the use of image collages in some artistic and creative settings. Although there are some methods that can generate irregularly-shaped image collages, they often suffer from severe image overlapping and excessive blank space. This prevents such methods from being effective information communication tools. In this paper, we present a shape slicing algorithm and an optimization scheme that can create image collages of arbitrary shapes in an informative and visually pleasing manner given an input shape and an image collection. To overcome the challenge of irregular shapes, we propose a novel algorithm, called Shape-Aware Slicing, which partitions the input shape into cells based on medial axis and binary slicing tree. Shape-Aware Slicing, which is designed specifically for irregular shapes, takes human perception and shape structure into account to generate visually pleasing partitions. Then, the layout is optimized by analyzing input images with the goal of maximizing the total salient regions of the images. To evaluate our method, we conduct extensive experiments and compare our results against previous work. The evaluations show that our proposed algorithm can efficiently arrange image collections on irregular shapes and create visually superior results than prior work and existing commercial tools.",
"title": "Image Collage on Arbitrary Shape via Shape-Aware Slicing and Optimization",
"normalizedTitle": "Image Collage on Arbitrary Shape via Shape-Aware Slicing and Optimization",
"fno": "10081386",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Shape",
"Layout",
"Visualization",
"Optimization",
"Partitioning Algorithms",
"Task Analysis",
"Social Networking Online",
"Image Collection Visualization",
"Image Collage",
"Irregular Shape Layout"
],
"authors": [
{
"givenName": "Dong-Yi",
"surname": "Wu",
"fullName": "Dong-Yi Wu",
"affiliation": "National Cheng-Kung University, Taiwan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Thi-Ngoc-Hanh",
"surname": "Le",
"fullName": "Thi-Ngoc-Hanh Le",
"affiliation": "National Cheng-Kung University, Taiwan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sheng-Yi",
"surname": "Yao",
"fullName": "Sheng-Yi Yao",
"affiliation": "National Cheng-Kung University, Taiwan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yun-Chen",
"surname": "Lin",
"fullName": "Yun-Chen Lin",
"affiliation": "National Cheng-Kung University, Taiwan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tong-Yee",
"surname": "Lee",
"fullName": "Tong-Yee Lee",
"affiliation": "National Cheng-Kung University, Taiwan",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2008/2242/0/04587789",
"title": "SMRFI: Shape matching via registration of vector-valued feature images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2008/04587789/12OmNvRU0s8",
"parentPublication": {
"id": "proceedings/cvpr/2008/2242/0",
"title": "2008 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2018/2335/0/233501a788",
"title": "Fast Face and Saliency Aware Collage Creation for Mobile Phones",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2018/233501a788/12OmNwDj0Zc",
"parentPublication": {
"id": "proceedings/fg/2018/2335/0",
"title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/edac/1991/9999/0/00206425",
"title": "Goal orientated slicing enumeration through shape function clipping",
"doi": null,
"abstractUrl": "/proceedings-article/edac/1991/00206425/12OmNwpXRVW",
"parentPublication": {
"id": "proceedings/edac/1991/9999/0",
"title": "Proceedings of the European Conference on Design Automation.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2009/4442/0/05457683",
"title": "Learning shape metrics based on deformations and transport",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2009/05457683/12OmNxdDFSA",
"parentPublication": {
"id": "proceedings/iccvw/2009/4442/0",
"title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2008/2242/0/04587775",
"title": "Efficient object shape recovery via slicing planes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2008/04587775/12OmNzV70OM",
"parentPublication": {
"id": "proceedings/cvpr/2008/2242/0",
"title": "2008 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mmbia/1996/7367/0/73670234",
"title": "A Computationally Efficient Shape Analysis via Level Sets",
"doi": null,
"abstractUrl": "/proceedings-article/mmbia/1996/73670234/12OmNzdoMv3",
"parentPublication": {
"id": "proceedings/mmbia/1996/7367/0",
"title": "Mathematical Methods in Biomedical Image Analysis, IEEE Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000b936",
"title": "Matryoshka Networks: Predicting 3D Geometry via Nested Shape Layers",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000b936/17D45WYQJ9j",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600u0813",
"title": "Topology-Preserving Shape Reconstruction and Registration via Neural Diffeomorphic Flow",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600u0813/1H1hR5pweWc",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aipr/2022/7729/0/10092211",
"title": "Detecting Software Code Vulnerabilities Using 2D Convolutional Neural Networks with Program Slicing Feature Maps",
"doi": null,
"abstractUrl": "/proceedings-article/aipr/2022/10092211/1MepKpgbMMo",
"parentPublication": {
"id": "proceedings/aipr/2022/7729/0",
"title": "2022 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/02/09540348",
"title": "Balance-Aware Grid Collage for Small Image Collections",
"doi": null,
"abstractUrl": "/journal/tg/2023/02/09540348/1wWCehU44hi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10081427",
"articleId": "1LRbRR2niSs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10083324",
"articleId": "1LSKC3EiUkU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1LSKE3gksiQ",
"name": "ttg555501-010081386s1-supp1-3262039.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010081386s1-supp1-3262039.pdf",
"extension": "pdf",
"size": "12.5 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LRbRjcZeLK",
"doi": "10.1109/TVCG.2023.3261320",
"abstract": "In recent years, narrative visualization has gained much attention. Researchers have proposed different design spaces for various narrative visualization genres and scenarios to facilitate the creation process. As users' needs grow and automation technologies advance, increasingly more tools have been designed and developed. In this study, we summarized six genres of narrative visualization (annotated charts, infographics, timelines & storylines, data comics, scrollytelling & slideshow, and data videos) based on previous research and four types of tools (design spaces, authoring tools, ML/AI-supported tools and ML/AI-generator tools) based on the intelligence and automation level of the tools. We surveyed 105 papers and tools to study how automation can progressively engage in visualization design and narrative processes to help users easily create narrative visualizations. This research aims to provide an overview of current research and development in the automation involvement of narrative visualization tools. We discuss key research problems in each category and suggest new opportunities to encourage further research in the related domain.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In recent years, narrative visualization has gained much attention. Researchers have proposed different design spaces for various narrative visualization genres and scenarios to facilitate the creation process. As users' needs grow and automation technologies advance, increasingly more tools have been designed and developed. In this study, we summarized six genres of narrative visualization (annotated charts, infographics, timelines & storylines, data comics, scrollytelling & slideshow, and data videos) based on previous research and four types of tools (design spaces, authoring tools, ML/AI-supported tools and ML/AI-generator tools) based on the intelligence and automation level of the tools. We surveyed 105 papers and tools to study how automation can progressively engage in visualization design and narrative processes to help users easily create narrative visualizations. This research aims to provide an overview of current research and development in the automation involvement of narrative visualization tools. We discuss key research problems in each category and suggest new opportunities to encourage further research in the related domain.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In recent years, narrative visualization has gained much attention. Researchers have proposed different design spaces for various narrative visualization genres and scenarios to facilitate the creation process. As users' needs grow and automation technologies advance, increasingly more tools have been designed and developed. In this study, we summarized six genres of narrative visualization (annotated charts, infographics, timelines & storylines, data comics, scrollytelling & slideshow, and data videos) based on previous research and four types of tools (design spaces, authoring tools, ML/AI-supported tools and ML/AI-generator tools) based on the intelligence and automation level of the tools. We surveyed 105 papers and tools to study how automation can progressively engage in visualization design and narrative processes to help users easily create narrative visualizations. This research aims to provide an overview of current research and development in the automation involvement of narrative visualization tools. We discuss key research problems in each category and suggest new opportunities to encourage further research in the related domain.",
"title": "How Does Automation Shape the Process of Narrative Visualization: A Survey of Tools",
"normalizedTitle": "How Does Automation Shape the Process of Narrative Visualization: A Survey of Tools",
"fno": "10081398",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Visualization",
"Automation",
"Authoring Systems",
"Videos",
"Flowcharts",
"Taxonomy",
"Data Visualization",
"Automatic Visualization",
"Narrative Visualization",
"Design Space",
"Authoring Tools",
"Survey"
],
"authors": [
{
"givenName": "Qing",
"surname": "Chen",
"fullName": "Qing Chen",
"affiliation": "Intelligent Big Data Visualization Lab, Tongji University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shixiong",
"surname": "Cao",
"fullName": "Shixiong Cao",
"affiliation": "Intelligent Big Data Visualization Lab, Tongji University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jiazhe",
"surname": "Wang",
"fullName": "Jiazhe Wang",
"affiliation": "Ant Group, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Nan",
"surname": "Cao",
"fullName": "Nan Cao",
"affiliation": "Intelligent Big Data Visualization Lab, Tongji University, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-20",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2016/8942/0/8942a203",
"title": "Promoting Insight: A Case Study of How to Incorporate Interaction in Existing Data Visualizations",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2016/8942a203/12OmNx7G68T",
"parentPublication": {
"id": "proceedings/iv/2016/8942/0",
"title": "2016 20th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/12/ttg2013122406",
"title": "A Deeper Understanding of Sequence in Narrative Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2013/12/ttg2013122406/13rRUwIF6l7",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg2010061139",
"title": "Narrative Visualization: Telling Stories with Data",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg2010061139/13rRUxAAST1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg2011122231",
"title": "Visualization Rhetoric: Framing Effects in Narrative Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg2011122231/13rRUxBJhFs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09745848",
"title": "A Model for Types and Levels of Automation in Visual Analytics: a Survey, a Taxonomy, and Examples",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09745848/1CbVnSejsjK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2019/9226/0/922600a237",
"title": "Designing Narrative Slideshows for Learning Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2019/922600a237/1cMF6FsJ8zK",
"parentPublication": {
"id": "proceedings/pacificvis/2019/9226/0",
"title": "2019 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a044",
"title": "Once Upon a Time in a Land Far Away: Guidelines for Spatio-Temporal Narrative Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a044/1cMF8rgW5na",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222251",
"title": "Designing Narrative-Focused Role-Playing Games for Visualization Literacy in Young Children",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222251/1nTr15tWhvq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2020/8014/0/801400a151",
"title": "Narrative Transitions in Data Videos",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2020/801400a151/1qRNOUZefuw",
"parentPublication": {
"id": "proceedings/vis/2020/8014/0",
"title": "2020 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2021/3335/0/333500a181",
"title": "Narrative Sensemaking: Strategies for Narrative Maps Construction",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2021/333500a181/1yXuj3PJXRm",
"parentPublication": {
"id": "proceedings/vis/2021/3335/0",
"title": "2021 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10081386",
"articleId": "1LRbSaS1ClW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10081427",
"articleId": "1LRbRR2niSs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LRbRR2niSs",
"doi": "10.1109/TVCG.2023.3261872",
"abstract": "We present a novel framework to efficiently acquire anisotropic reflectance in a pixel-independent fashion, using a deep gated mixture-of-experts. While existing work employs a unified network to handle all possible input, our network automatically learns to condition on the input for enhanced reconstruction. We train a gating module that takes photometric measurements as input and selects one out of a number of specialized decoders for reflectance reconstruction, essentially trading generality for quality. A common pre-trained latent-transform module is also appended to each decoder, to offset the burden of the increased number of decoders. In addition, the illumination conditions during acquisition can be jointly optimized. The effectiveness of our framework is validated on a wide variety of challenging near-planar samples with a lightstage. Compared with the state-of-the-art technique, our quality is improved with the same number of input images, and our input image number can be reduced to about 1/3 for equal-quality results. We further generalize the framework to enhance a state-of-the-art technique on non-planar reflectance scanning.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a novel framework to efficiently acquire anisotropic reflectance in a pixel-independent fashion, using a deep gated mixture-of-experts. While existing work employs a unified network to handle all possible input, our network automatically learns to condition on the input for enhanced reconstruction. We train a gating module that takes photometric measurements as input and selects one out of a number of specialized decoders for reflectance reconstruction, essentially trading generality for quality. A common pre-trained latent-transform module is also appended to each decoder, to offset the burden of the increased number of decoders. In addition, the illumination conditions during acquisition can be jointly optimized. The effectiveness of our framework is validated on a wide variety of challenging near-planar samples with a lightstage. Compared with the state-of-the-art technique, our quality is improved with the same number of input images, and our input image number can be reduced to about 1/3 for equal-quality results. We further generalize the framework to enhance a state-of-the-art technique on non-planar reflectance scanning.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a novel framework to efficiently acquire anisotropic reflectance in a pixel-independent fashion, using a deep gated mixture-of-experts. While existing work employs a unified network to handle all possible input, our network automatically learns to condition on the input for enhanced reconstruction. We train a gating module that takes photometric measurements as input and selects one out of a number of specialized decoders for reflectance reconstruction, essentially trading generality for quality. A common pre-trained latent-transform module is also appended to each decoder, to offset the burden of the increased number of decoders. In addition, the illumination conditions during acquisition can be jointly optimized. The effectiveness of our framework is validated on a wide variety of challenging near-planar samples with a lightstage. Compared with the state-of-the-art technique, our quality is improved with the same number of input images, and our input image number can be reduced to about 1/3 for equal-quality results. We further generalize the framework to enhance a state-of-the-art technique on non-planar reflectance scanning.",
"title": "Efficient Reflectance Capture With a Deep Gated Mixture-of-Experts",
"normalizedTitle": "Efficient Reflectance Capture With a Deep Gated Mixture-of-Experts",
"fno": "10081427",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Lighting",
"Image Reconstruction",
"Decoding",
"Optimization",
"Light Emitting Diodes",
"Cameras",
"Neural Networks",
"Anisotropic Reflectance",
"Computational Illumination",
"SVBRDF"
],
"authors": [
{
"givenName": "Xiaohe",
"surname": "Ma",
"fullName": "Xiaohe Ma",
"affiliation": "State Key Lab of CAD & CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yaxin",
"surname": "Yu",
"fullName": "Yaxin Yu",
"affiliation": "State Key Lab of CAD & CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hongzhi",
"surname": "Wu",
"fullName": "Hongzhi Wu",
"affiliation": "State Key Lab of CAD & CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kun",
"surname": "Zhou",
"fullName": "Kun Zhou",
"affiliation": "State Key Lab of CAD & CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2013/5053/0/06475040",
"title": "A full-spherical device for simultaneous geometry and reflectance acquisition",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2013/06475040/12OmNBSSV8O",
"parentPublication": {
"id": "proceedings/wacv/2013/5053/0",
"title": "Applications of Computer Vision, IEEE Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2013/3211/0/3211a441",
"title": "An LED-Based Spectral Imaging System for Surface Reflectance and Normal Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2013/3211a441/12OmNroijaF",
"parentPublication": {
"id": "proceedings/sitis/2013/3211/0",
"title": "2013 International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2015/8667/0/07168375",
"title": "Single-Shot Reflectance Measurement from Polarized Color Gradient Illumination",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2015/07168375/12OmNxWLTsI",
"parentPublication": {
"id": "proceedings/iccp/2015/8667/0",
"title": "2015 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032f372",
"title": "Reflectance Capture Using Univariate Sampling of BRDFs",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032f372/12OmNz6iOHS",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/04/07383338",
"title": "Inter-reflection Compensation of Immersive Projection Display by Spatio-Temporal Screen Reflectance Modulation",
"doi": null,
"abstractUrl": "/journal/tg/2016/04/07383338/13rRUwInvfc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2014/01/ttp2014010086",
"title": "Discriminative Illumination: Per-Pixel Classification of Raw Materials Based on Optimal Projections of Spectral BRDF",
"doi": null,
"abstractUrl": "/journal/tp/2014/01/ttp2014010086/13rRUwdIOW1",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2018/1360/0/136000a147",
"title": "Flexible Computer Simulation for Power Loss Analysis of Direct AC LED Drive Systems",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2018/136000a147/1gjRoolUnvi",
"parentPublication": {
"id": "proceedings/csci/2018/1360/0",
"title": "2018 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093491",
"title": "Inferring Super-Resolution Depth from a Moving Light-Source Enhanced RGB-D Sensor: A Variational Approach",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093491/1jPbFHPBYnm",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2021/1952/0/09466267",
"title": "Spectral MVIR: Joint Reconstruction of 3D Shape and Spectral Reflectance",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2021/09466267/1uSSWr7wnkY",
"parentPublication": {
"id": "proceedings/iccp/2021/1952/0",
"title": "2021 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/02/09557800",
"title": "Neural Reflectance Capture in the View-Illumination Domain",
"doi": null,
"abstractUrl": "/journal/tg/2023/02/09557800/1xquQN6emfS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10081398",
"articleId": "1LRbRjcZeLK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10081433",
"articleId": "1LRbR78bpDy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1LSKDegd1i8",
"name": "ttg555501-010081427s1-supp1-3261872.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010081427s1-supp1-3261872.mp4",
"extension": "mp4",
"size": "65.1 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LRbR78bpDy",
"doi": "10.1109/TVCG.2023.3261934",
"abstract": "Linear diagrams are used to visualize set systems by depicting set memberships as horizontal line segments in a matrix, where each set is represented as a row and each element as a column. Each such line segment of a set is shown in a contiguous horizontal range of cells of the matrix indicating that the corresponding elements in the columns belong to the set. As each set occupies its own row in the matrix, the total height of the resulting visualization is as large as the number of sets in the instance. Such a linear diagram can be visually sparse and intersecting sets containing the same element might be represented by distant rows. To alleviate such undesirable effects, we present LinSets.zip, a new approach that achieves a more space-efficient representation of linear diagrams. First, we minimize the total number of gaps in the horizontal segments by reordering columns, a criterion that has been shown to increase readability in linear diagrams. The main difference of LinSets.zip to linear diagrams is that multiple non-intersecting sets can be positioned in the same row of the matrix. Furthermore, we present several different rendering variations for a matrix-based representation that utilize the proposed row compression. We implemented the different steps of our approach in a visualization pipeline using integer-linear programming, and suitable heuristics aiming at sufficiently fast computations in practice. We conducted both a quantitative evaluation and a small-scale user experiment to compare the effects of compressing linear diagrams.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Linear diagrams are used to visualize set systems by depicting set memberships as horizontal line segments in a matrix, where each set is represented as a row and each element as a column. Each such line segment of a set is shown in a contiguous horizontal range of cells of the matrix indicating that the corresponding elements in the columns belong to the set. As each set occupies its own row in the matrix, the total height of the resulting visualization is as large as the number of sets in the instance. Such a linear diagram can be visually sparse and intersecting sets containing the same element might be represented by distant rows. To alleviate such undesirable effects, we present LinSets.zip, a new approach that achieves a more space-efficient representation of linear diagrams. First, we minimize the total number of gaps in the horizontal segments by reordering columns, a criterion that has been shown to increase readability in linear diagrams. The main difference of LinSets.zip to linear diagrams is that multiple non-intersecting sets can be positioned in the same row of the matrix. Furthermore, we present several different rendering variations for a matrix-based representation that utilize the proposed row compression. We implemented the different steps of our approach in a visualization pipeline using integer-linear programming, and suitable heuristics aiming at sufficiently fast computations in practice. We conducted both a quantitative evaluation and a small-scale user experiment to compare the effects of compressing linear diagrams.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Linear diagrams are used to visualize set systems by depicting set memberships as horizontal line segments in a matrix, where each set is represented as a row and each element as a column. Each such line segment of a set is shown in a contiguous horizontal range of cells of the matrix indicating that the corresponding elements in the columns belong to the set. As each set occupies its own row in the matrix, the total height of the resulting visualization is as large as the number of sets in the instance. Such a linear diagram can be visually sparse and intersecting sets containing the same element might be represented by distant rows. To alleviate such undesirable effects, we present LinSets.zip, a new approach that achieves a more space-efficient representation of linear diagrams. First, we minimize the total number of gaps in the horizontal segments by reordering columns, a criterion that has been shown to increase readability in linear diagrams. The main difference of LinSets.zip to linear diagrams is that multiple non-intersecting sets can be positioned in the same row of the matrix. Furthermore, we present several different rendering variations for a matrix-based representation that utilize the proposed row compression. We implemented the different steps of our approach in a visualization pipeline using integer-linear programming, and suitable heuristics aiming at sufficiently fast computations in practice. We conducted both a quantitative evaluation and a small-scale user experiment to compare the effects of compressing linear diagrams.",
"title": "LinSets.zip: Compressing Linear Set Diagrams",
"normalizedTitle": "LinSets.zip: Compressing Linear Set Diagrams",
"fno": "10081433",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Task Analysis",
"Visualization",
"Data Visualization",
"Image Color Analysis",
"Heuristic Algorithms",
"Taxonomy",
"Sparse Matrices",
"Set Visualization",
"Linear Diagrams",
"User Evaluation",
"Computational Experiment"
],
"authors": [
{
"givenName": "Markus",
"surname": "Wallinger",
"fullName": "Markus Wallinger",
"affiliation": "Algorithms and Complexity Group, TUWien, Vienna, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Alexander",
"surname": "Dobler",
"fullName": "Alexander Dobler",
"affiliation": "Algorithms and Complexity Group, TUWien, Vienna, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Martin",
"surname": "Nollenburg",
"fullName": "Martin Nollenburg",
"affiliation": "Algorithms and Complexity Group, TUWien, Vienna, Austria",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/dac/1997/2477/0/24770202",
"title": "Linear Sifting of Decision Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/dac/1997/24770202/12OmNA0MZ2D",
"parentPublication": {
"id": "proceedings/dac/1997/2477/0",
"title": "Design Automation Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ia3/2016/3867/0/3867a070",
"title": "HISC/R: An Efficient Hypersparse-Matrix Storage Format for Scalable Graph Processing",
"doi": null,
"abstractUrl": "/proceedings-article/ia3/2016/3867a070/12OmNB1eJDD",
"parentPublication": {
"id": "proceedings/ia3/2016/3867/0",
"title": "2016 6th Workshop on Irregular Applications: Architecture and Algorithms (IA3)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/services/2013/5024/0/5024a193",
"title": "Parallel Matrix Multiplication Algorithm Based on Vector Linear Combination Using MapReduce",
"doi": null,
"abstractUrl": "/proceedings-article/services/2013/5024a193/12OmNBpEeX2",
"parentPublication": {
"id": "proceedings/services/2013/5024/0",
"title": "2013 IEEE World Congress on Services (SERVICES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fccm/2015/9969/0/9969a064",
"title": "Accelerating SpMV on FPGAs by Compressing Nonzero Values",
"doi": null,
"abstractUrl": "/proceedings-article/fccm/2015/9969a064/12OmNvEyR8X",
"parentPublication": {
"id": "proceedings/fccm/2015/9969/0",
"title": "2015 IEEE 23rd Annual International Symposium on Field-Programmable Custom Computing Machines (FCCM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2012/4789/0/4789a673",
"title": "Storage and Solving of Large Sparse Matrix Linear Equations",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2012/4789a673/12OmNx7ov3K",
"parentPublication": {
"id": "proceedings/iccis/2012/4789/0",
"title": "2012 Fourth International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipdpsw/2017/3408/0/07965138",
"title": "A Compression Method for Storage Formats of a Sparse Matrix in Solving the Large-Scale Linear Systems",
"doi": null,
"abstractUrl": "/proceedings-article/ipdpsw/2017/07965138/12OmNyKJiAU",
"parentPublication": {
"id": "proceedings/ipdpsw/2017/3408/0",
"title": "2017 IEEE International Parallel and Distributed Processing Symposium: Workshops (IPDPSW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icgciot/2015/7910/0/07380712",
"title": "Spherule diagrams: A matrix-based set visualization compared with Euler diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/icgciot/2015/07380712/12OmNyvGyfY",
"parentPublication": {
"id": "proceedings/icgciot/2015/7910/0",
"title": "2015 International Conference on Green Computing and Internet of Things (ICGCIoT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2015/11/06954570",
"title": "Sparse Matrix Multiplication On An Associative Processor",
"doi": null,
"abstractUrl": "/journal/td/2015/11/06954570/13rRUxlgy3l",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse-euc/2016/3593/0/07982305",
"title": "On the Stability and Performance of the Solution of Sparse Linear Systems by Partitioned Procedures",
"doi": null,
"abstractUrl": "/proceedings-article/cse-euc/2016/07982305/17D45XlyDuP",
"parentPublication": {
"id": "proceedings/cse-euc/2016/3593/0",
"title": "2016 19th IEEE Intl Conference on Computational Science and Engineering (CSE), IEEE 14th Intl Conference on Embedded and Ubiquitous Computing (EUC), and 15th Intl Symposium on Distributed Computing and Applications for Business Engineering (DCABES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2022/8045/0/10020221",
"title": "Scalable Parallel Machine Learning Computing a Summarization Matrix with SQL Queries",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2022/10020221/1KfR871H22A",
"parentPublication": {
"id": "proceedings/big-data/2022/8045/0",
"title": "2022 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10081462",
"articleId": "1LRbR0bikbC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10081398",
"articleId": "1LRbRjcZeLK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LRbQOpNdTO",
"doi": "10.1109/TVCG.2023.3261981",
"abstract": "This paper presents a well-scaling parallel algorithm for the computation of Morse-Smale (MS) segmentations, including the region separators and region boundaries. The segmentation of the domain into ascending and descending manifolds, solely defined on the vertices, improves the computational time using path compression and fully segments the border region. Region boundaries and region separators are generated using a multi-label marching tetrahedra algorithm. This enables a fast and simple solution to find optimal parameter settings in preliminary exploration steps by generating an MS complex preview. It also poses a rapid option to generate a fast visual representation of the region geometries for immediate utilization. Two experiments demonstrate the performance of our approach with speedups of over an order of magnitude in comparison to two publicly available implementations. The example section shows the similarity to the MS complex, the useability of the approach, and the benefits of this method with respect to the presented datasets. We provide our implementation with the paper.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a well-scaling parallel algorithm for the computation of Morse-Smale (MS) segmentations, including the region separators and region boundaries. The segmentation of the domain into ascending and descending manifolds, solely defined on the vertices, improves the computational time using path compression and fully segments the border region. Region boundaries and region separators are generated using a multi-label marching tetrahedra algorithm. This enables a fast and simple solution to find optimal parameter settings in preliminary exploration steps by generating an MS complex preview. It also poses a rapid option to generate a fast visual representation of the region geometries for immediate utilization. Two experiments demonstrate the performance of our approach with speedups of over an order of magnitude in comparison to two publicly available implementations. The example section shows the similarity to the MS complex, the useability of the approach, and the benefits of this method with respect to the presented datasets. We provide our implementation with the paper.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a well-scaling parallel algorithm for the computation of Morse-Smale (MS) segmentations, including the region separators and region boundaries. The segmentation of the domain into ascending and descending manifolds, solely defined on the vertices, improves the computational time using path compression and fully segments the border region. Region boundaries and region separators are generated using a multi-label marching tetrahedra algorithm. This enables a fast and simple solution to find optimal parameter settings in preliminary exploration steps by generating an MS complex preview. It also poses a rapid option to generate a fast visual representation of the region geometries for immediate utilization. Two experiments demonstrate the performance of our approach with speedups of over an order of magnitude in comparison to two publicly available implementations. The example section shows the similarity to the MS complex, the useability of the approach, and the benefits of this method with respect to the presented datasets. We provide our implementation with the paper.",
"title": "Parallel Computation of Piecewise Linear Morse-Smale Segmentations",
"normalizedTitle": "Parallel Computation of Piecewise Linear Morse-Smale Segmentations",
"fno": "10081444",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Particle Separators",
"Visualization",
"Topology",
"Image Color Analysis",
"Graphics Processing Units",
"Data Visualization",
"Computational Efficiency",
"Morse Smale Complex",
"Segmentation",
"Topology",
"Visualization",
"Watershed Transformation"
],
"authors": [
{
"givenName": "Robin G. C.",
"surname": "Maack",
"fullName": "Robin G. C. Maack",
"affiliation": "RPTU Kaiserslautern-Landau",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jonas",
"surname": "Lukasczyk",
"fullName": "Jonas Lukasczyk",
"affiliation": "RPTU Kaiserslautern-Landau",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Julien",
"surname": "Tierny",
"fullName": "Julien Tierny",
"affiliation": "CNRS and Sorbonne Université",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hans",
"surname": "Hagen",
"fullName": "Hans Hagen",
"affiliation": "RPTU Kaiserslautern-Landau",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ross",
"surname": "Maciejewski",
"fullName": "Ross Maciejewski",
"affiliation": "Arizona State University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Christoph",
"surname": "Garth",
"fullName": "Christoph Garth",
"affiliation": "RPTU Kaiserslautern-Landau",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icde/1993/3570/0/00344058",
"title": "ARIES/LHS: A concurrency control and recovery method using write-ahead logging for linear hashing with separators",
"doi": null,
"abstractUrl": "/proceedings-article/icde/1993/00344058/12OmNC8MsCB",
"parentPublication": {
"id": "proceedings/icde/1993/3570/0",
"title": "Proceedings of IEEE 9th International Conference on Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipdps/2012/4675/0/4675a484",
"title": "The Parallel Computation of Morse-Smale Complexes",
"doi": null,
"abstractUrl": "/proceedings-article/ipdps/2012/4675a484/12OmNrFBQ1B",
"parentPublication": {
"id": "proceedings/ipdps/2012/4675/0",
"title": "Parallel and Distributed Processing Symposium, International",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2017/4822/0/482201a557",
"title": "CombinedPWD: A New Password Authentication Mechanism Using Separators Between Keystrokes",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2017/482201a557/12OmNyRPgqy",
"parentPublication": {
"id": "proceedings/cis/2017/4822/0",
"title": "2017 13th International Conference on Computational Intelligence and Security (CIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/1993/4960/0/00395630",
"title": "A hybrid page segmentation method",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/1993/00395630/12OmNylKB5Y",
"parentPublication": {
"id": "proceedings/icdar/1993/4960/0",
"title": "Proceedings of 2nd International Conference on Document Analysis and Recognition (ICDAR '93)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06875918",
"title": "Conforming Morse-Smale Complexes",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06875918/13rRUwjGoLG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/10/ttg2012101757",
"title": "Parallel Computation of 2D Morse-Smale Complexes",
"doi": null,
"abstractUrl": "/journal/tg/2012/10/ttg2012101757/13rRUxASuSL",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2008/06/04658183",
"title": "A Practical Approach to Morse-Smale Complex Computation: Scalability and Generality",
"doi": null,
"abstractUrl": "/journal/tg/2008/06/04658183/13rRUxOdD8b",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/09/06065731",
"title": "Direct Feature Visualization Using Morse-Smale Complexes",
"doi": null,
"abstractUrl": "/journal/tg/2012/09/06065731/13rRUyv53Fn",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse/2019/0869/0/086900a037",
"title": "Natural Software Revisited",
"doi": null,
"abstractUrl": "/proceedings-article/icse/2019/086900a037/1cMFworDOtW",
"parentPublication": {
"id": "proceedings/icse/2019/0869/0",
"title": "2019 IEEE/ACM 41st International Conference on Software Engineering (ICSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2020/8014/0/801400a036",
"title": "GPU Parallel Computation of Morse-Smale Complexes",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2020/801400a036/1qRO66SHgwU",
"parentPublication": {
"id": "proceedings/vis/2020/8014/0",
"title": "2020 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10081433",
"articleId": "1LRbR78bpDy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10081462",
"articleId": "1LRbR0bikbC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LRbR0bikbC",
"doi": "10.1109/TVCG.2023.3261944",
"abstract": "Public opinion surveys constitute a widespread, powerful tool to study peoples' attitudes and behaviors from comparative perspectives. However, even global surveys can have limited geographic and temporal coverage, which can hinder the production of comprehensive knowledge. To expand the scope of comparison, social scientists turn to ex-post harmonization of variables from datasets that cover similar topics but in different populations and/or at different times. These harmonized datasets can be analyzed as a single source and accessed through various data portals. However, the Survey Data Recycling (SDR) research project has identified three challenges faced by social scientists when using data portals: the lack of capability to explore data in-depth or query data based on customized needs, the difficulty in efficiently identifying related data for studies, and the incapability to evaluate theoretical models using sliced data. To address these issues, the SDR research project has developed the <italic>SDR</italic>Querier, which is applied to the harmonized SDR database. The <italic>SDR</italic>Querier includes a BERT-based model that allows for customized data queries through research questions or keywords (Query-by-Question), a visual design that helps users determine the availability of harmonized data for a given research question (Query-by-Condition), and the ability to reveal the underlying relational patterns among substantive and methodological variables in the database (Query-by-Relation), aiding in the rigorous evaluation or improvement of regression models. Case studies with multiple social scientists have demonstrated the usefulness and effectiveness of the <italic>SDR</italic>Querier in addressing daily challenges.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Public opinion surveys constitute a widespread, powerful tool to study peoples' attitudes and behaviors from comparative perspectives. However, even global surveys can have limited geographic and temporal coverage, which can hinder the production of comprehensive knowledge. To expand the scope of comparison, social scientists turn to ex-post harmonization of variables from datasets that cover similar topics but in different populations and/or at different times. These harmonized datasets can be analyzed as a single source and accessed through various data portals. However, the Survey Data Recycling (SDR) research project has identified three challenges faced by social scientists when using data portals: the lack of capability to explore data in-depth or query data based on customized needs, the difficulty in efficiently identifying related data for studies, and the incapability to evaluate theoretical models using sliced data. To address these issues, the SDR research project has developed the <italic>SDR</italic>Querier, which is applied to the harmonized SDR database. The <italic>SDR</italic>Querier includes a BERT-based model that allows for customized data queries through research questions or keywords (Query-by-Question), a visual design that helps users determine the availability of harmonized data for a given research question (Query-by-Condition), and the ability to reveal the underlying relational patterns among substantive and methodological variables in the database (Query-by-Relation), aiding in the rigorous evaluation or improvement of regression models. Case studies with multiple social scientists have demonstrated the usefulness and effectiveness of the <italic>SDR</italic>Querier in addressing daily challenges.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Public opinion surveys constitute a widespread, powerful tool to study peoples' attitudes and behaviors from comparative perspectives. However, even global surveys can have limited geographic and temporal coverage, which can hinder the production of comprehensive knowledge. To expand the scope of comparison, social scientists turn to ex-post harmonization of variables from datasets that cover similar topics but in different populations and/or at different times. These harmonized datasets can be analyzed as a single source and accessed through various data portals. However, the Survey Data Recycling (SDR) research project has identified three challenges faced by social scientists when using data portals: the lack of capability to explore data in-depth or query data based on customized needs, the difficulty in efficiently identifying related data for studies, and the incapability to evaluate theoretical models using sliced data. To address these issues, the SDR research project has developed the SDRQuerier, which is applied to the harmonized SDR database. The SDRQuerier includes a BERT-based model that allows for customized data queries through research questions or keywords (Query-by-Question), a visual design that helps users determine the availability of harmonized data for a given research question (Query-by-Condition), and the ability to reveal the underlying relational patterns among substantive and methodological variables in the database (Query-by-Relation), aiding in the rigorous evaluation or improvement of regression models. Case studies with multiple social scientists have demonstrated the usefulness and effectiveness of the SDRQuerier in addressing daily challenges.",
"title": "<italic>SDR</italic>Querier: A Visual Querying Framework for Cross-National Survey Data Recycling",
"normalizedTitle": "SDRQuerier: A Visual Querying Framework for Cross-National Survey Data Recycling",
"fno": "10081462",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Data Models",
"Biological System Modeling",
"Rivers",
"Portals",
"Bit Error Rate",
"Sociology",
"Survey Data Recycling",
"Data Harmonization",
"Visual Data Query",
"Social Science",
"Visual Analytics"
],
"authors": [
{
"givenName": "Yamei",
"surname": "Tu",
"fullName": "Yamei Tu",
"affiliation": "Ohio State University, Columbus, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Olga",
"surname": "Li",
"fullName": "Olga Li",
"affiliation": "Visa Research, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Junpeng",
"surname": "Wang",
"fullName": "Junpeng Wang",
"affiliation": "Visa Research, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Han-Wei",
"surname": "Shen",
"fullName": "Han-Wei Shen",
"affiliation": "Ohio State University, Columbus, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Przemek",
"surname": "Powałko",
"fullName": "Przemek Powałko",
"affiliation": "Visa Research, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Irina",
"surname": "Tomescu-Dubrow",
"fullName": "Irina Tomescu-Dubrow",
"affiliation": "Ohio State University, Columbus, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kazimierz M.",
"surname": "Slomczynski",
"fullName": "Kazimierz M. Slomczynski",
"affiliation": "Visa Research, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Spyros",
"surname": "Blanas",
"fullName": "Spyros Blanas",
"affiliation": "Ohio State University, Columbus, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "J. Craig",
"surname": "Jenkins",
"fullName": "J. Craig Jenkins",
"affiliation": "Ohio State University, Columbus, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cic/2016/4607/0/4607a433",
"title": "Knowledge Graph-Based Query Rewriting in a Relational Data Harmonization Framework",
"doi": null,
"abstractUrl": "/proceedings-article/cic/2016/4607a433/12OmNApcuBR",
"parentPublication": {
"id": "proceedings/cic/2016/4607/0",
"title": "2016 IEEE 2nd International Conference on Collaboration and Internet Computing (CIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2015/9926/0/07363790",
"title": "DSDQuery DSI — Querying scientific data repositories with structured operators",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2015/07363790/12OmNBa2iAJ",
"parentPublication": {
"id": "proceedings/big-data/2015/9926/0",
"title": "2015 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/sc/2023/02/09774922",
"title": "<italic>PuzzleMesh</italic>: A Puzzle Model to Build Mesh of Agnostic Services for Edge-Fog-Cloud",
"doi": null,
"abstractUrl": "/journal/sc/2023/02/09774922/1DlicOYj0S4",
"parentPublication": {
"id": "trans/sc",
"title": "IEEE Transactions on Services Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2022/12/09810502",
"title": "<italic>Eiffel</italic>: Efficient and Fair Scheduling in Adaptive Federated Learning",
"doi": null,
"abstractUrl": "/journal/td/2022/12/09810502/1EBiW6MzSiA",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09847102",
"title": "<italic>MD-Cave</italic>: An Immersive Visualization Workbench for Radiologists",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09847102/1Fu4IEH0oAU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/5555/01/09865200",
"title": "<italic>DeepContext:</italic> Mobile Context Modeling and Prediction Via HMMs and Deep Learning",
"doi": null,
"abstractUrl": "/journal/tm/5555/01/09865200/1G4AyNfIfII",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904569",
"title": "<italic>Probablement, Wahrscheinlich, Likely</italic>? A Cross-Language Study of How People Verbalize Probabilities in Icon Array Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904569/1H0GlRvv21y",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tq/2022/04/09409653",
"title": "NN-EMD: Efficiently Training <italic>Neural Networks</italic> Using <italic>Encrypted Multi-Sourced Datasets</italic>",
"doi": null,
"abstractUrl": "/journal/tq/2022/04/09409653/1sXjHNCpGrm",
"parentPublication": {
"id": "trans/tq",
"title": "IEEE Transactions on Dependable and Secure Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/03/09382912",
"title": "VERTIGo: A Visual Platform for Querying and Exploring Large Multilayer Networks",
"doi": null,
"abstractUrl": "/journal/tg/2022/03/09382912/1saZsRW0LYY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tq/2022/06/09511199",
"title": "<italic>SeUpdate</italic>: Secure Encrypted Data Update for Multi-User Environments",
"doi": null,
"abstractUrl": "/journal/tq/2022/06/09511199/1vXcQkCqiQg",
"parentPublication": {
"id": "trans/tq",
"title": "IEEE Transactions on Dependable and Secure Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10081444",
"articleId": "1LRbQOpNdTO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10081433",
"articleId": "1LRbR78bpDy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1M2IJ7vw7vi",
"name": "ttg555501-010081462s1-supp1-3261944.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010081462s1-supp1-3261944.mp4",
"extension": "mp4",
"size": "123 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LRbQCd2D7O",
"doi": "10.1109/TVCG.2023.3261910",
"abstract": "Collusive fraud, in which multiple fraudsters collude to defraud health insurance funds, threatens the operation of the healthcare system. However, existing statistical and machine learning-based methods have limited ability to detect fraud in the scenario of health insurance due to the high similarity of fraudulent behaviors to normal medical visits and the lack of labeled data. To ensure the accuracy of the detection results, expert knowledge needs to be integrated with the fraud detection process. By working closely with health insurance audit experts, we propose FraudAuditor, a three-stage visual analytics approach to collusive fraud detection in health insurance. Specifically, we first allow users to interactively construct a co-visit network to holistically model the visit relationships of different patients. Second, an improved community detection algorithm that considers the strength of fraud likelihood is designed to detect suspicious fraudulent groups. Finally, through our visual interface, users can compare, investigate, and verify suspicious patient behavior with tailored visualizations that support different time scales. We conducted case studies in a real-world healthcare scenario, i.e., to help locate the actual fraud group and exclude the false positive group. The results and expert feedback proved the effectiveness and usability of the approach.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Collusive fraud, in which multiple fraudsters collude to defraud health insurance funds, threatens the operation of the healthcare system. However, existing statistical and machine learning-based methods have limited ability to detect fraud in the scenario of health insurance due to the high similarity of fraudulent behaviors to normal medical visits and the lack of labeled data. To ensure the accuracy of the detection results, expert knowledge needs to be integrated with the fraud detection process. By working closely with health insurance audit experts, we propose FraudAuditor, a three-stage visual analytics approach to collusive fraud detection in health insurance. Specifically, we first allow users to interactively construct a co-visit network to holistically model the visit relationships of different patients. Second, an improved community detection algorithm that considers the strength of fraud likelihood is designed to detect suspicious fraudulent groups. Finally, through our visual interface, users can compare, investigate, and verify suspicious patient behavior with tailored visualizations that support different time scales. We conducted case studies in a real-world healthcare scenario, i.e., to help locate the actual fraud group and exclude the false positive group. The results and expert feedback proved the effectiveness and usability of the approach.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Collusive fraud, in which multiple fraudsters collude to defraud health insurance funds, threatens the operation of the healthcare system. However, existing statistical and machine learning-based methods have limited ability to detect fraud in the scenario of health insurance due to the high similarity of fraudulent behaviors to normal medical visits and the lack of labeled data. To ensure the accuracy of the detection results, expert knowledge needs to be integrated with the fraud detection process. By working closely with health insurance audit experts, we propose FraudAuditor, a three-stage visual analytics approach to collusive fraud detection in health insurance. Specifically, we first allow users to interactively construct a co-visit network to holistically model the visit relationships of different patients. Second, an improved community detection algorithm that considers the strength of fraud likelihood is designed to detect suspicious fraudulent groups. Finally, through our visual interface, users can compare, investigate, and verify suspicious patient behavior with tailored visualizations that support different time scales. We conducted case studies in a real-world healthcare scenario, i.e., to help locate the actual fraud group and exclude the false positive group. The results and expert feedback proved the effectiveness and usability of the approach.",
"title": "FraudAuditor: A Visual Analytics Approach for Collusive Fraud in Health Insurance",
"normalizedTitle": "FraudAuditor: A Visual Analytics Approach for Collusive Fraud in Health Insurance",
"fno": "10081495",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Fraud",
"Insurance",
"Drugs",
"Behavioral Sciences",
"Diseases",
"Feature Extraction",
"Visual Analytics",
"Collusive Fraud",
"Fraud Detection",
"Health Insurance"
],
"authors": [
{
"givenName": "Jiehui",
"surname": "Zhou",
"fullName": "Jiehui Zhou",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xumeng",
"surname": "Wang",
"fullName": "Xumeng Wang",
"affiliation": "TMCC, CS, Nankai University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jie",
"surname": "Wang",
"fullName": "Jie Wang",
"affiliation": "Alibaba Group, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hui",
"surname": "Ye",
"fullName": "Hui Ye",
"affiliation": "Tencent, Shenzhen, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huanliang",
"surname": "Wang",
"fullName": "Huanliang Wang",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zihan",
"surname": "Zhou",
"fullName": "Zihan Zhou",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dongming",
"surname": "Han",
"fullName": "Dongming Han",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Haochao",
"surname": "Ying",
"fullName": "Haochao Ying",
"affiliation": "School of Public Health, Zhejiang University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jian",
"surname": "Wu",
"fullName": "Jian Wu",
"affiliation": "Second Affiliated Hospital School of Medicine, School of Public Health, and Institute of Wenzhou, Zhejiang University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wei",
"surname": "Chen",
"fullName": "Wei Chen",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ic3/2017/3077/0/08284299",
"title": "Fraud detection and frequent pattern matching in insurance claims using data mining techniques",
"doi": null,
"abstractUrl": "/proceedings-article/ic3/2017/08284299/12OmNANBZrf",
"parentPublication": {
"id": "proceedings/ic3/2017/3077/0",
"title": "2017 Tenth International Conference on Contemporary Computing (IC3)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cso/2011/4335/0/4335b276",
"title": "Random Rough Subspace Based Neural Network Ensemble for Insurance Fraud Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cso/2011/4335b276/12OmNAYGltt",
"parentPublication": {
"id": "proceedings/cso/2011/4335/0",
"title": "2011 Fourth International Joint Conference on Computational Sciences and Optimization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2015/9926/0/07363893",
"title": "Investigating insurance fraud using social media",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2015/07363893/12OmNrAdsyk",
"parentPublication": {
"id": "proceedings/big-data/2015/9926/0",
"title": "2015 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2016/5910/0/07836752",
"title": "Data Exchange Platform to Fight Insurance Fraud on Blockchain",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2016/07836752/12OmNwHhoZN",
"parentPublication": {
"id": "proceedings/icdmw/2016/5910/0",
"title": "2016 IEEE 16th International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccac/2017/1939/0/1939a186",
"title": "Fraud Data Analytics Tools and Techniques in Big Data Era",
"doi": null,
"abstractUrl": "/proceedings-article/iccac/2017/1939a186/12OmNwJybOC",
"parentPublication": {
"id": "proceedings/iccac/2017/1939/0",
"title": "2017 International Conference on Cloud and Autonomic Computing (ICCAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigdatasecurity-hpsc-ids/2022/8069/0/806900a174",
"title": "Node Similarity-based Search Method for Medical Insurance Heterogeneous Information Network",
"doi": null,
"abstractUrl": "/proceedings-article/bigdatasecurity-hpsc-ids/2022/806900a174/1EykEmpyAE0",
"parentPublication": {
"id": "proceedings/bigdatasecurity-hpsc-ids/2022/8069/0",
"title": "2022 IEEE 8th Intl Conference on Big Data Security on Cloud (BigDataSecurity), IEEE Intl Conference on High Performance and Smart Computing, (HPSC) and IEEE Intl Conference on Intelligent Data and Security (IDS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859871",
"title": "Hierarchical Multi-Modal Fusion on Dynamic Heterogeneous Graph for Health Insurance Fraud Detection",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859871/1G9ELjH8sYU",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2019/0858/0/09006115",
"title": "InfDetect: a Large Scale Graph-based Fraud Detection System for E-Commerce Insurance",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2019/09006115/1hJsdeaOrJu",
"parentPublication": {
"id": "proceedings/big-data/2019/0858/0",
"title": "2019 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/sc/2022/04/09320307",
"title": "Identifying Health Insurance Claim Frauds Using Mixture of Clinical Concepts",
"doi": null,
"abstractUrl": "/journal/sc/2022/04/09320307/1qkwnyqwQkU",
"parentPublication": {
"id": "trans/sc",
"title": "IEEE Transactions on Services Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigdatasecurity-hpsc-ids/2021/3927/0/392700a057",
"title": "Research on Bootstrapping Algorithm for Health Insurance Data Fraud Detection Based on Decision Tree",
"doi": null,
"abstractUrl": "/proceedings-article/bigdatasecurity-hpsc-ids/2021/392700a057/1uPzdCEUsbC",
"parentPublication": {
"id": "proceedings/bigdatasecurity-hpsc-ids/2021/3927/0",
"title": "2021 7th IEEE Intl Conference on Big Data Security on Cloud (BigDataSecurity), IEEE Intl Conference on High Performance and Smart Computing, (HPSC) and IEEE Intl Conference on Intelligent Data and Security (IDS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10077744",
"articleId": "1LH8EZ3NEGI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10081444",
"articleId": "1LRbQOpNdTO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LFQ6Ir6DEQ",
"doi": "10.1109/TVCG.2023.3259183",
"abstract": "The significance of artistry in creating animated virtual characters is widely acknowledged, and motion style is a crucial element in this process. There has been a long-standing interest in stylizing character animations with style transfer methods. However, this kind of models can only deal with short-term motions and yield deterministic outputs. To address this issue, we propose a generative model based on normalizing flows for stylizing long and aperiodic animations in the VR scene. Our approach breaks down this task into two sub-problems: motion style transfer and stylized motion generation, both formulated as the instances of conditional normalizing flows with multi-class latent space. Specifically, we encode high-frequency style features into the latent space for varied results and control the generation process with style-content labels for disentangled edits of style and content. We have developed a prototype, StyleVR, in Unity, which allows casual users to apply our method in VR. Through qualitative and quantitative comparisons, we demonstrate that our system outperforms other methods in terms of style transfer as well as stochastic stylized motion generation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The significance of artistry in creating animated virtual characters is widely acknowledged, and motion style is a crucial element in this process. There has been a long-standing interest in stylizing character animations with style transfer methods. However, this kind of models can only deal with short-term motions and yield deterministic outputs. To address this issue, we propose a generative model based on normalizing flows for stylizing long and aperiodic animations in the VR scene. Our approach breaks down this task into two sub-problems: motion style transfer and stylized motion generation, both formulated as the instances of conditional normalizing flows with multi-class latent space. Specifically, we encode high-frequency style features into the latent space for varied results and control the generation process with style-content labels for disentangled edits of style and content. We have developed a prototype, StyleVR, in Unity, which allows casual users to apply our method in VR. Through qualitative and quantitative comparisons, we demonstrate that our system outperforms other methods in terms of style transfer as well as stochastic stylized motion generation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The significance of artistry in creating animated virtual characters is widely acknowledged, and motion style is a crucial element in this process. There has been a long-standing interest in stylizing character animations with style transfer methods. However, this kind of models can only deal with short-term motions and yield deterministic outputs. To address this issue, we propose a generative model based on normalizing flows for stylizing long and aperiodic animations in the VR scene. Our approach breaks down this task into two sub-problems: motion style transfer and stylized motion generation, both formulated as the instances of conditional normalizing flows with multi-class latent space. Specifically, we encode high-frequency style features into the latent space for varied results and control the generation process with style-content labels for disentangled edits of style and content. We have developed a prototype, StyleVR, in Unity, which allows casual users to apply our method in VR. Through qualitative and quantitative comparisons, we demonstrate that our system outperforms other methods in terms of style transfer as well as stochastic stylized motion generation.",
"title": "StyleVR: Stylizing Character Animations with Normalizing Flows",
"normalizedTitle": "StyleVR: Stylizing Character Animations with Normalizing Flows",
"fno": "10076832",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Animation",
"Trajectory",
"Three Dimensional Displays",
"Solid Modeling",
"Probabilistic Logic",
"Prototypes",
"Computational Modeling",
"Character Animation",
"Motion Generation",
"Style Transfer",
"Normalizing Flow",
"Virtual Reality"
],
"authors": [
{
"givenName": "Bin",
"surname": "Ji",
"fullName": "Bin Ji",
"affiliation": "Shanghai Jiao Tong University, Shanghai, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ye",
"surname": "Pan",
"fullName": "Ye Pan",
"affiliation": "Shanghai Jiao Tong University, Shanghai, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yichao",
"surname": "Yan",
"fullName": "Yichao Yan",
"affiliation": "Shanghai Jiao Tong University, Shanghai, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ruizhao",
"surname": "Chen",
"fullName": "Ruizhao Chen",
"affiliation": "Shanghai Jiao Tong University, Shanghai, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiaokang",
"surname": "Yang",
"fullName": "Xiaokang Yang",
"affiliation": "Shanghai Jiao Tong University, Shanghai, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "mags/cg/2012/03/mcg2012030059",
"title": "Sketch-n-Stretch: Sketching Animations Using Cutouts",
"doi": null,
"abstractUrl": "/magazine/cg/2012/03/mcg2012030059/13rRUyeTVks",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2021/2398/0/239800b385",
"title": "Dynamic Attributed Graph Prediction with Conditional Normalizing Flows",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2021/239800b385/1Aqx8vdmQCY",
"parentPublication": {
"id": "proceedings/icdm/2021/2398/0",
"title": "2021 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600l1234",
"title": "Style-Structure Disentangled Features and Normalizing Flows for Diverse Icon Colorization",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600l1234/1H0Og7iXqlq",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600r7442",
"title": "Modeling sRGB Camera Noise with Normalizing Flows",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600r7442/1H1mr0cQZpu",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09992151",
"title": "Personalized Audio-Driven 3D Facial Animation Via Style-Content Disentanglement",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09992151/1JevBLSiUqA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2022/7172/0/717200a218",
"title": "Emotionally Expressive Motion Controller for Virtual Character Locomotion Animations",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2022/717200a218/1KaHFw8d8VW",
"parentPublication": {
"id": "proceedings/ism/2022/7172/0",
"title": "2022 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300e540",
"title": "PointFlow: 3D Point Cloud Generation With Continuous Normalizing Flows",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300e540/1hQqlMAar9S",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300d165",
"title": "Noise Flow: Noise Modeling With Conditional Normalizing Flows",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300d165/1hVllwEnMrK",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/11/09089305",
"title": "Normalizing Flows: An Introduction and Review of Current Methods",
"doi": null,
"abstractUrl": "/journal/tp/2021/11/09089305/1jDwlyVxAwE",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900n3607",
"title": "Autoregressive Stylized Motion Synthesis with Generative Flow",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900n3607/1yeIFQTwlXO",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10076837",
"articleId": "1LFQ6yTQbIs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10077440",
"articleId": "1LFQ6PMpeik",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LFQ6yTQbIs",
"doi": "10.1109/TVCG.2023.3259044",
"abstract": "Geometric deep learning has sparked a rising interest in computer graphics to perform shape understanding tasks, such as shape classification and semantic segmentation. When the input is a polygonal surface, one has to suffer from the irregular mesh structure. Motivated by the geometric spectral theory, we introduce <italic>Laplacian2Mesh</italic>, a novel and flexible convolutional neural network (CNN) framework for coping with irregular triangle meshes (vertices may have any valence). By mapping the input mesh surface to the multi-dimensional Laplacian-Beltrami space, Laplacian2Mesh enables one to perform shape analysis tasks directly using the mature CNNs, without the need to deal with the irregular connectivity of the mesh structure. We further define a mesh pooling operation such that the receptive field of the network can be expanded while retaining the original vertex set as well as the connections between them. Besides, we introduce a channel-wise self-attention block to learn the individual importance of feature ingredients. Laplacian2Mesh not only decouples the geometry from the irregular connectivity of the mesh structure but also better captures the global features that are central to shape classification and segmentation. Extensive tests on various datasets demonstrate the effectiveness and efficiency of Laplacian2Mesh, particularly in terms of the capability of being vulnerable to noise to fulfill various learning tasks.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Geometric deep learning has sparked a rising interest in computer graphics to perform shape understanding tasks, such as shape classification and semantic segmentation. When the input is a polygonal surface, one has to suffer from the irregular mesh structure. Motivated by the geometric spectral theory, we introduce <italic>Laplacian2Mesh</italic>, a novel and flexible convolutional neural network (CNN) framework for coping with irregular triangle meshes (vertices may have any valence). By mapping the input mesh surface to the multi-dimensional Laplacian-Beltrami space, Laplacian2Mesh enables one to perform shape analysis tasks directly using the mature CNNs, without the need to deal with the irregular connectivity of the mesh structure. We further define a mesh pooling operation such that the receptive field of the network can be expanded while retaining the original vertex set as well as the connections between them. Besides, we introduce a channel-wise self-attention block to learn the individual importance of feature ingredients. Laplacian2Mesh not only decouples the geometry from the irregular connectivity of the mesh structure but also better captures the global features that are central to shape classification and segmentation. Extensive tests on various datasets demonstrate the effectiveness and efficiency of Laplacian2Mesh, particularly in terms of the capability of being vulnerable to noise to fulfill various learning tasks.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Geometric deep learning has sparked a rising interest in computer graphics to perform shape understanding tasks, such as shape classification and semantic segmentation. When the input is a polygonal surface, one has to suffer from the irregular mesh structure. Motivated by the geometric spectral theory, we introduce Laplacian2Mesh, a novel and flexible convolutional neural network (CNN) framework for coping with irregular triangle meshes (vertices may have any valence). By mapping the input mesh surface to the multi-dimensional Laplacian-Beltrami space, Laplacian2Mesh enables one to perform shape analysis tasks directly using the mature CNNs, without the need to deal with the irregular connectivity of the mesh structure. We further define a mesh pooling operation such that the receptive field of the network can be expanded while retaining the original vertex set as well as the connections between them. Besides, we introduce a channel-wise self-attention block to learn the individual importance of feature ingredients. Laplacian2Mesh not only decouples the geometry from the irregular connectivity of the mesh structure but also better captures the global features that are central to shape classification and segmentation. Extensive tests on various datasets demonstrate the effectiveness and efficiency of Laplacian2Mesh, particularly in terms of the capability of being vulnerable to noise to fulfill various learning tasks.",
"title": "Laplacian2Mesh: Laplacian-Based Mesh Understanding",
"normalizedTitle": "Laplacian2Mesh: Laplacian-Based Mesh Understanding",
"fno": "10076837",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Shape",
"Task Analysis",
"Convolution",
"Laplace Equations",
"Three Dimensional Displays",
"Deep Learning",
"Kernel",
"Geometric Deep Learning",
"Laplacian Pooling",
"Laplacian Beltrami Space",
"Mesh Understanding"
],
"authors": [
{
"givenName": "Qiujie",
"surname": "Dong",
"fullName": "Qiujie Dong",
"affiliation": "School of Computer Science and Technology, Shandong University, Qingdao, Shandong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zixiong",
"surname": "Wang",
"fullName": "Zixiong Wang",
"affiliation": "School of Computer Science and Technology, Shandong University, Qingdao, Shandong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Manyi",
"surname": "Li",
"fullName": "Manyi Li",
"affiliation": "School of Software, Shandong University, Jinan, Shandong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Junjie",
"surname": "Gao",
"fullName": "Junjie Gao",
"affiliation": "School of Computer Science and Technology, Shandong University, Qingdao, Shandong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shuangmin",
"surname": "Chen",
"fullName": "Shuangmin Chen",
"affiliation": "School of Information and Technology, Qingdao University of Science and Technology, Qingdao, Shandong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhenyu",
"surname": "Shu",
"fullName": "Zhenyu Shu",
"affiliation": "School of Computer and Data Engineering, Ningbo Institute of Technology, Zhejiang University, Ningbo, Zhejiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shiqing",
"surname": "Xin",
"fullName": "Shiqing Xin",
"affiliation": "School of Computer Science and Technology, Shandong University, Qingdao, Shandong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Changhe",
"surname": "Tu",
"fullName": "Changhe Tu",
"affiliation": "School of Computer Science and Technology, Shandong University, Qingdao, Shandong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wenping",
"surname": "Wang",
"fullName": "Wenping Wang",
"affiliation": "Department of Computer Science & Engineering, Texas A&M University, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cgiv/2007/2928/0/29280195",
"title": "Mesh Editing in ROI with Dual Laplacian",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2007/29280195/12OmNBRsVy4",
"parentPublication": {
"id": "proceedings/cgiv/2007/2928/0",
"title": "Computer Graphics, Imaging and Visualisation (CGIV 2007)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-cg/2005/2473/0/24730269",
"title": "A Global Laplacian Smoothing Approach with Feature Preservation",
"doi": null,
"abstractUrl": "/proceedings-article/cad-cg/2005/24730269/12OmNBTJIAu",
"parentPublication": {
"id": "proceedings/cad-cg/2005/2473/0",
"title": "Ninth International Conference on Computer Aided Design and Computer Graphics (CAD-CG'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssiai/2016/9919/0/07459193",
"title": "Convolutional Laplacian sparse coding",
"doi": null,
"abstractUrl": "/proceedings-article/ssiai/2016/07459193/12OmNBajTKp",
"parentPublication": {
"id": "proceedings/ssiai/2016/9919/0",
"title": "2016 IEEE Southwest Symposium on Image Analysis and Interpretation (SSIAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2013/5099/0/5099a179",
"title": "Shape Inflation with an Adapted Laplacian Operator for Hybrid Quad/Triangle Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2013/5099a179/12OmNqGRGaD",
"parentPublication": {
"id": "proceedings/sibgrapi/2013/5099/0",
"title": "2013 XXVI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460450",
"title": "Shape signature using the edge-based Laplacian",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460450/12OmNwudQLH",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpp/2016/2823/0/2823a588",
"title": "Locality-Aware Laplacian Mesh Smoothing",
"doi": null,
"abstractUrl": "/proceedings-article/icpp/2016/2823a588/12OmNynJMIp",
"parentPublication": {
"id": "proceedings/icpp/2016/2823/0",
"title": "2016 45th International Conference on Parallel Processing (ICPP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457g584",
"title": "SyncSpecCNN: Synchronized Spectral CNN for 3D Shape Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457g584/12OmNyoAA54",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/03/ttg2009030518",
"title": "Quasi-Developable Mesh Surface Interpolation via Mesh Deformation",
"doi": null,
"abstractUrl": "/journal/tg/2009/03/ttg2009030518/13rRUxjQybN",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10070611",
"title": "Mesh Neural Networks Based on Dual Graph Pyramids",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10070611/1LvvYkEy8XC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/02/09159927",
"title": "Learning on 3D Meshes With Laplacian Encoding and Pooling",
"doi": null,
"abstractUrl": "/journal/tg/2022/02/09159927/1m3m77L2v3a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10076909",
"articleId": "1LFQ6bbu1Wg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10076832",
"articleId": "1LFQ6Ir6DEQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1LH8FviYvN6",
"name": "ttg555501-010076837s1-supp1-3259044.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010076837s1-supp1-3259044.pdf",
"extension": "pdf",
"size": "217 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LFQ7mRf54s",
"doi": "10.1109/TVCG.2023.3259141",
"abstract": "Although neural supersampling has achieved great success in various applications for improving image quality, it is still difficult to apply it to a wide range of real-time rendering applications due to the high computational power demand. Most existing methods are computationally expensive and require high-performance hardware, preventing their use on platforms with limited hardware, such as smartphones. To this end, we propose a new supersampling framework for real-time rendering applications to reconstruct a high-quality image out of a low-resolution one, which is sufficiently lightweight to run on smartphones within a real-time budget. Our model takes as input the renderer-generated low resolution content and produces high resolution and anti-aliased results. To maximize sampling efficiency, we propose using an alternate sub-pixel sample pattern during the rasterization process. This allows us to create a relatively small reconstruction model while maintaining high image quality. By accumulating new samples into a high-resolution history buffer, an efficient history check and re-usage scheme is introduced to improve temporal stability. To our knowledge, this is the first research in pushing real-time neural supersampling on mobile devices. Due to the absence of training data, we present a new dataset containing 57 training and test sequences from three game scenes. Furthermore, based on the rendered motion vectors and a visual perception study, we introduce a new metric called inter-frame structural similarity (IF-SSIM) to quantitatively measure the temporal stability of rendered videos. Extensive evaluations demonstrate that our supersampling model outperforms existing or alternative solutions in both performance and temporal stability.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Although neural supersampling has achieved great success in various applications for improving image quality, it is still difficult to apply it to a wide range of real-time rendering applications due to the high computational power demand. Most existing methods are computationally expensive and require high-performance hardware, preventing their use on platforms with limited hardware, such as smartphones. To this end, we propose a new supersampling framework for real-time rendering applications to reconstruct a high-quality image out of a low-resolution one, which is sufficiently lightweight to run on smartphones within a real-time budget. Our model takes as input the renderer-generated low resolution content and produces high resolution and anti-aliased results. To maximize sampling efficiency, we propose using an alternate sub-pixel sample pattern during the rasterization process. This allows us to create a relatively small reconstruction model while maintaining high image quality. By accumulating new samples into a high-resolution history buffer, an efficient history check and re-usage scheme is introduced to improve temporal stability. To our knowledge, this is the first research in pushing real-time neural supersampling on mobile devices. Due to the absence of training data, we present a new dataset containing 57 training and test sequences from three game scenes. Furthermore, based on the rendered motion vectors and a visual perception study, we introduce a new metric called inter-frame structural similarity (IF-SSIM) to quantitatively measure the temporal stability of rendered videos. Extensive evaluations demonstrate that our supersampling model outperforms existing or alternative solutions in both performance and temporal stability.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Although neural supersampling has achieved great success in various applications for improving image quality, it is still difficult to apply it to a wide range of real-time rendering applications due to the high computational power demand. Most existing methods are computationally expensive and require high-performance hardware, preventing their use on platforms with limited hardware, such as smartphones. To this end, we propose a new supersampling framework for real-time rendering applications to reconstruct a high-quality image out of a low-resolution one, which is sufficiently lightweight to run on smartphones within a real-time budget. Our model takes as input the renderer-generated low resolution content and produces high resolution and anti-aliased results. To maximize sampling efficiency, we propose using an alternate sub-pixel sample pattern during the rasterization process. This allows us to create a relatively small reconstruction model while maintaining high image quality. By accumulating new samples into a high-resolution history buffer, an efficient history check and re-usage scheme is introduced to improve temporal stability. To our knowledge, this is the first research in pushing real-time neural supersampling on mobile devices. Due to the absence of training data, we present a new dataset containing 57 training and test sequences from three game scenes. Furthermore, based on the rendered motion vectors and a visual perception study, we introduce a new metric called inter-frame structural similarity (IF-SSIM) to quantitatively measure the temporal stability of rendered videos. Extensive evaluations demonstrate that our supersampling model outperforms existing or alternative solutions in both performance and temporal stability.",
"title": "MNSS: Neural Supersampling Framework for Real-Time Rendering on Mobile Devices",
"normalizedTitle": "MNSS: Neural Supersampling Framework for Real-Time Rendering on Mobile Devices",
"fno": "10076842",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Rendering Computer Graphics",
"Real Time Systems",
"Image Reconstruction",
"Image Resolution",
"Videos",
"Artificial Intelligence",
"Neural Networks",
"Deep Learning",
"Neural Supersampling",
"Real Time Rendering"
],
"authors": [
{
"givenName": "Sipeng",
"surname": "Yang",
"fullName": "Sipeng Yang",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, PR China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yunlu",
"surname": "Zhao",
"fullName": "Yunlu Zhao",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, PR China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yuzhe",
"surname": "Luo",
"fullName": "Yuzhe Luo",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, PR China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "He",
"surname": "Wang",
"fullName": "He Wang",
"affiliation": "School of Computing, University of Leeds, Leeds, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hongyu",
"surname": "Sun",
"fullName": "Hongyu Sun",
"affiliation": "OPPO US Research Center, Bellevue, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chen",
"surname": "Li",
"fullName": "Chen Li",
"affiliation": "OPPO US Research Center, Bellevue, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Binghuang",
"surname": "Cai",
"fullName": "Binghuang Cai",
"affiliation": "OPPO US Research Center, Bellevue, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiaogang",
"surname": "Jin",
"fullName": "Xiaogang Jin",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, PR China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cgiv/2004/2178/0/21780056",
"title": "Framework for Real Time Cloud Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2004/21780056/12OmNBqdr7f",
"parentPublication": {
"id": "proceedings/cgiv/2004/2178/0",
"title": "Proceedings. International Conference on Computer Graphics, Imaging and Visualization, 2004. CGIV 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2014/6854/0/6854a372",
"title": "Parameterized Rendering for Multiresolution Terrain Structure",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2014/6854a372/12OmNCdBDGL",
"parentPublication": {
"id": "proceedings/icvrv/2014/6854/0",
"title": "2014 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2016/1552/0/07574730",
"title": "A parallel volume rendering method for massive data",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2016/07574730/12OmNx0A7P1",
"parentPublication": {
"id": "proceedings/icmew/2016/1552/0",
"title": "2016 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ldav/2011/0155/0/06092330",
"title": "CERA-TVR: A framework for interactive high-quality teravoxel volume visualization on standard PCs",
"doi": null,
"abstractUrl": "/proceedings-article/ldav/2011/06092330/12OmNxFsmwU",
"parentPublication": {
"id": "proceedings/ldav/2011/0155/0",
"title": "IEEE Symposium on Large Data Analysis and Visualization (LDAV 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2009/4534/0/05559006",
"title": "Enhancing and experiencing spacetime resolution with videos and stills",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2009/05559006/12OmNyXMQ9U",
"parentPublication": {
"id": "proceedings/iccp/2009/4534/0",
"title": "IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1999/5897/0/58970049",
"title": "LOD-Sprite Technique for Accelerated Terrain Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1999/58970049/12OmNynJMIq",
"parentPublication": {
"id": "proceedings/ieee-vis/1999/5897/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/02/07117432",
"title": "Frameless Volume Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2016/02/07117432/13rRUxcbnHd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/06/08918030",
"title": "Volumetric Isosurface Rendering with Deep Learning-Based Super-Resolution",
"doi": null,
"abstractUrl": "/journal/tg/2021/06/08918030/1fm1QUuzRAI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ldav/2019/2605/0/08944381",
"title": "Real-Time Compression of Dynamically Generated Images for Offscreen Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ldav/2019/08944381/1grOFDTENry",
"parentPublication": {
"id": "proceedings/ldav/2019/2605/0",
"title": "2019 IEEE 9th Symposium on Large Data Analysis and Visualization (LDAV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/12/09606538",
"title": "AvatarMe<sup>++</sup>: Facial Shape and BRDF Inference With Photorealistic Rendering-Aware GANs",
"doi": null,
"abstractUrl": "/journal/tp/2022/12/09606538/1ymEN8wBXRC",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10076837",
"articleId": "1LFQ6yTQbIs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10076909",
"articleId": "1LFQ6bbu1Wg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LFQ6bbu1Wg",
"doi": "10.1109/TVCG.2023.3257840",
"abstract": "We present <italic>MobileSky</italic>, the first automatic method for real-time high-quality sky replacement for mobile AR applications. The primary challenge of this task is how to extract sky regions in camera feed both quickly and accurately. While the problem of sky replacement is not new, previous methods mainly concern extraction quality rather than efficiency, limiting their application to our task. We aim to provide higher quality, both spatially and temporally consistent sky mask maps for all camera frames in real time. To this end, we develop a novel framework that combines a new deep semantic network called <italic>FSNet</italic> with novel post-processing refinement steps. By leveraging IMU data, we also propose new sky-aware constraints such as temporal consistency, position consistency, and color consistency to help refine the weakly classified part of the segmentation output. Experiments show that our method achieves an average of around 30 FPS on off-the-shelf smartphones and outperforms the state-of-the-art sky replacement methods in terms of execution speed and quality. In the meantime, our mask maps appear to be visually more stable across frames. Our fast sky replacement method enables several applications, such as AR advertising, art making, generating fantasy celestial objects, visually learning about weather phenomena, and advanced video-based visual effects. To facilitate future research, we also create a new video dataset containing annotated sky regions with IMU data.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present <italic>MobileSky</italic>, the first automatic method for real-time high-quality sky replacement for mobile AR applications. The primary challenge of this task is how to extract sky regions in camera feed both quickly and accurately. While the problem of sky replacement is not new, previous methods mainly concern extraction quality rather than efficiency, limiting their application to our task. We aim to provide higher quality, both spatially and temporally consistent sky mask maps for all camera frames in real time. To this end, we develop a novel framework that combines a new deep semantic network called <italic>FSNet</italic> with novel post-processing refinement steps. By leveraging IMU data, we also propose new sky-aware constraints such as temporal consistency, position consistency, and color consistency to help refine the weakly classified part of the segmentation output. Experiments show that our method achieves an average of around 30 FPS on off-the-shelf smartphones and outperforms the state-of-the-art sky replacement methods in terms of execution speed and quality. In the meantime, our mask maps appear to be visually more stable across frames. Our fast sky replacement method enables several applications, such as AR advertising, art making, generating fantasy celestial objects, visually learning about weather phenomena, and advanced video-based visual effects. To facilitate future research, we also create a new video dataset containing annotated sky regions with IMU data.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present MobileSky, the first automatic method for real-time high-quality sky replacement for mobile AR applications. The primary challenge of this task is how to extract sky regions in camera feed both quickly and accurately. While the problem of sky replacement is not new, previous methods mainly concern extraction quality rather than efficiency, limiting their application to our task. We aim to provide higher quality, both spatially and temporally consistent sky mask maps for all camera frames in real time. To this end, we develop a novel framework that combines a new deep semantic network called FSNet with novel post-processing refinement steps. By leveraging IMU data, we also propose new sky-aware constraints such as temporal consistency, position consistency, and color consistency to help refine the weakly classified part of the segmentation output. Experiments show that our method achieves an average of around 30 FPS on off-the-shelf smartphones and outperforms the state-of-the-art sky replacement methods in terms of execution speed and quality. In the meantime, our mask maps appear to be visually more stable across frames. Our fast sky replacement method enables several applications, such as AR advertising, art making, generating fantasy celestial objects, visually learning about weather phenomena, and advanced video-based visual effects. To facilitate future research, we also create a new video dataset containing annotated sky regions with IMU data.",
"title": "MobileSky: Real-Time Sky Replacement for Mobile AR",
"normalizedTitle": "MobileSky: Real-Time Sky Replacement for Mobile AR",
"fno": "10076909",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Real Time Systems",
"Cameras",
"Streaming Media",
"Image Color Analysis",
"Performance Evaluation",
"Motion Segmentation",
"Task Analysis",
"Mobile Augmented Reality",
"Semantic Segmentation",
"Sky Replacement"
],
"authors": [
{
"givenName": "Xinjie",
"surname": "Wang",
"fullName": "Xinjie Wang",
"affiliation": "Department of Computer Science and Technology, Ocean University of China, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Qingxuan",
"surname": "Lv",
"fullName": "Qingxuan Lv",
"affiliation": "Department of Computer Science and Technology, Ocean University of China, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Guo",
"surname": "Chen",
"fullName": "Guo Chen",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jing",
"surname": "Zhang",
"fullName": "Jing Zhang",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhiqiang",
"surname": "Wei",
"fullName": "Zhiqiang Wei",
"affiliation": "Department of Computer Science and Technology, Ocean University of China, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Junyu",
"surname": "Dong",
"fullName": "Junyu Dong",
"affiliation": "Department of Computer Science and Technology, Ocean University of China, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hongbo",
"surname": "Fu",
"fullName": "Hongbo Fu",
"affiliation": "City University of Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhipeng",
"surname": "Zhu",
"fullName": "Zhipeng Zhu",
"affiliation": "Guangdong OPPO Mobile Telecommunications Corp., Ltd",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jingxin",
"surname": "Liu",
"fullName": "Jingxin Liu",
"affiliation": "Department of Computer Science and Technology, Ocean University of China, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiaogang",
"surname": "Jin",
"fullName": "Xiaogang Jin",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-17",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2009/4442/0/05457506",
"title": "Robust 3D street-view reconstruction using sky motion estimation",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2009/05457506/12OmNAlNiTP",
"parentPublication": {
"id": "proceedings/iccvw/2009/4442/0",
"title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460411",
"title": "Daylight spectrum model under weather conditions from clear sky to cloudy",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460411/12OmNqGiu6n",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2016/0641/0/07477637",
"title": "Sky segmentation in the wild: An empirical study",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2016/07477637/12OmNqyUUIH",
"parentPublication": {
"id": "proceedings/wacv/2016/0641/0",
"title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500b734",
"title": "Segmenting Sky Pixels in Images: Analysis and Comparison",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500b734/18j8MdG6BYk",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/snpd/2021/0403/0/09705011",
"title": "Conditional Data Augmentation For Sky Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/snpd/2021/09705011/1AUpfhlU7G8",
"parentPublication": {
"id": "proceedings/snpd/2021/0403/0",
"title": "2021 IEEE/ACIS 22nd International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel/Distributed Computing (SNPD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mlke/2022/9567/0/956700a169",
"title": "Low Light Image Enhancement for Color Images Combined with Sky Region Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/mlke/2022/956700a169/1CY7NPKksJa",
"parentPublication": {
"id": "proceedings/mlke/2022/9567/0",
"title": "2022 International Conference on Machine Learning and Knowledge Engineering (MLKE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2022/7172/0/717200a261",
"title": "Cloud Region Segmentation from All Sky Images using Double K-Means Clustering",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2022/717200a261/1KaHOWuko2A",
"parentPublication": {
"id": "proceedings/ism/2022/7172/0",
"title": "2022 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150590",
"title": "Sky Optimization: Semantically aware image processing of skies in low-light photography",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150590/1lPHv7ZkSeQ",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412595",
"title": "Fast Region-Adaptive Defogging and Enhancement for Outdoor Images Containing Sky",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412595/1tmhVKCNMe4",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/04/09662197",
"title": "Real-Time Globally Consistent 3D Reconstruction With Semantic Priors",
"doi": null,
"abstractUrl": "/journal/tg/2023/04/09662197/1zzldRFngEo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10076842",
"articleId": "1LFQ7mRf54s",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10077087",
"articleId": "1LFQ7zitdtK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1LH8EGdb6b6",
"name": "ttg555501-010076909s1-supp1-3257840.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010076909s1-supp1-3257840.mp4",
"extension": "mp4",
"size": "111 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LFQ7zitdtK",
"doi": "10.1109/TVCG.2023.3259341",
"abstract": "Presenting a predictive model's performance is a communication bottleneck that threatens collaborations between data scientists and subject matter experts. Accuracy and error metrics alone fail to tell the whole story of a model – its risks, strengths, and limitations – making it difficult for subject matter experts to feel confident in their decision to use a model. As a result, models may fail in unexpected ways or go entirely unused, as subject matter experts disregard poorly presented models in favor of familiar, yet arguably substandard methods. In this paper, we describe an iterative study conducted with both subject matter experts and data scientists to understand the gaps in communication between these two groups. We find that, while the two groups share common goals of understanding the data and predictions of the model, friction can stem from unfamiliar terms, metrics, and visualizations – limiting the transfer of knowledge to SMEs and discouraging clarifying questions being asked during presentations. Based on our findings, we derive a set of communication guidelines that use visualization as a common medium for communicating the strengths and weaknesses of a model. We provide a demonstration of our guidelines in a regression modeling scenario and elicit feedback on their use from subject matter experts. From our demonstration, subject matter experts were more comfortable discussing a model's performance, more aware of the trade-offs for the presented model, and better equipped to assess the model's risks – ultimately informing and contextualizing the model's use beyond text and numbers.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Presenting a predictive model's performance is a communication bottleneck that threatens collaborations between data scientists and subject matter experts. Accuracy and error metrics alone fail to tell the whole story of a model – its risks, strengths, and limitations – making it difficult for subject matter experts to feel confident in their decision to use a model. As a result, models may fail in unexpected ways or go entirely unused, as subject matter experts disregard poorly presented models in favor of familiar, yet arguably substandard methods. In this paper, we describe an iterative study conducted with both subject matter experts and data scientists to understand the gaps in communication between these two groups. We find that, while the two groups share common goals of understanding the data and predictions of the model, friction can stem from unfamiliar terms, metrics, and visualizations – limiting the transfer of knowledge to SMEs and discouraging clarifying questions being asked during presentations. Based on our findings, we derive a set of communication guidelines that use visualization as a common medium for communicating the strengths and weaknesses of a model. We provide a demonstration of our guidelines in a regression modeling scenario and elicit feedback on their use from subject matter experts. From our demonstration, subject matter experts were more comfortable discussing a model's performance, more aware of the trade-offs for the presented model, and better equipped to assess the model's risks – ultimately informing and contextualizing the model's use beyond text and numbers.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Presenting a predictive model's performance is a communication bottleneck that threatens collaborations between data scientists and subject matter experts. Accuracy and error metrics alone fail to tell the whole story of a model – its risks, strengths, and limitations – making it difficult for subject matter experts to feel confident in their decision to use a model. As a result, models may fail in unexpected ways or go entirely unused, as subject matter experts disregard poorly presented models in favor of familiar, yet arguably substandard methods. In this paper, we describe an iterative study conducted with both subject matter experts and data scientists to understand the gaps in communication between these two groups. We find that, while the two groups share common goals of understanding the data and predictions of the model, friction can stem from unfamiliar terms, metrics, and visualizations – limiting the transfer of knowledge to SMEs and discouraging clarifying questions being asked during presentations. Based on our findings, we derive a set of communication guidelines that use visualization as a common medium for communicating the strengths and weaknesses of a model. We provide a demonstration of our guidelines in a regression modeling scenario and elicit feedback on their use from subject matter experts. From our demonstration, subject matter experts were more comfortable discussing a model's performance, more aware of the trade-offs for the presented model, and better equipped to assess the model's risks – ultimately informing and contextualizing the model's use beyond text and numbers.",
"title": "Are Metrics Enough? Guidelines for Communicating and Visualizing Predictive Models to Subject Matter Experts",
"normalizedTitle": "Are Metrics Enough? Guidelines for Communicating and Visualizing Predictive Models to Subject Matter Experts",
"fno": "10077087",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Models",
"Data Visualization",
"Predictive Models",
"Interviews",
"Data Science",
"Guidelines",
"Stakeholders",
"Visualization Techniques And Methodologies",
"Human Factors",
"Modeling And Prediction",
"Data Communications Aspects"
],
"authors": [
{
"givenName": "Ashley",
"surname": "Suh",
"fullName": "Ashley Suh",
"affiliation": "Tufts University, Medford, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Gabriel",
"surname": "Appleby",
"fullName": "Gabriel Appleby",
"affiliation": "Tufts University, Medford, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Erik W.",
"surname": "Anderson",
"fullName": "Erik W. Anderson",
"affiliation": "Novartis Pharmaceuticals Corporation, Data Science and AI, Switzerland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Luca",
"surname": "Finelli",
"fullName": "Luca Finelli",
"affiliation": "Novartis Pharmaceuticals Corporation, Data Science and AI, Switzerland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Remco",
"surname": "Chang",
"fullName": "Remco Chang",
"affiliation": "Tufts University, Medford, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dylan",
"surname": "Cashman",
"fullName": "Dylan Cashman",
"affiliation": "Novartis Pharmaceuticals Corporation, Data Science and AI, Switzerland",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-16",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/fie/2011/468/0/06142742",
"title": "Legitimization of subject matter in a design intensive degree program",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2011/06142742/12OmNBDQbn6",
"parentPublication": {
"id": "proceedings/fie/2011/468/0",
"title": "2011 Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/esem/2017/4039/0/4039a211",
"title": "Describing What Experimental Software Engineering Experts Do When They Design Their Experiments - A Qualitative Study",
"doi": null,
"abstractUrl": "/proceedings-article/esem/2017/4039a211/12OmNrHSD6b",
"parentPublication": {
"id": "proceedings/esem/2017/4039/0",
"title": "2017 ACM/IEEE International Symposium on Empirical Software Engineering and Measurement (ESEM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cec-eee/2008/3340/0/3340a412",
"title": "Business Process Illustration: Supporting Experience-Grounded Validation of New Business Processes by Subject Matter Experts",
"doi": null,
"abstractUrl": "/proceedings-article/cec-eee/2008/3340a412/12OmNvAAtoI",
"parentPublication": {
"id": "proceedings/cec-eee/2008/3340/0",
"title": "10th IEEE Conference on E-Commerce Technology (CEC'08) and the Fifth IEEE Conference on Enterprise Computing, E-Commerce and E-Services (EEE'08)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2001/1417/0/14170069",
"title": "Automatic Knowledge Acquisition from Subject Matter Experts",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2001/14170069/12OmNvlg8n0",
"parentPublication": {
"id": "proceedings/ictai/2001/1417/0",
"title": "Proceedings 13th IEEE International Conference on Tools with Artificial Intelligence. ICTAI 2001",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rew/2017/3488/0/3488a415",
"title": "Towards Requirements Communication and Documentation Guidelines for Agile Teams",
"doi": null,
"abstractUrl": "/proceedings-article/rew/2017/3488a415/12OmNynJML3",
"parentPublication": {
"id": "proceedings/rew/2017/3488/0",
"title": "2017 IEEE 25th International Requirements Engineering Conference Workshops (REW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2013/09/ttk2013091933",
"title": "A Formalism and Method for Representing and Reasoning with Process Models Authored by Subject Matter Experts",
"doi": null,
"abstractUrl": "/journal/tk/2013/09/ttk2013091933/13rRUyuvRpb",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2018/1174/0/08659283",
"title": "Toward a National Agenda for Broadening Participation of African Americans in Engineering & Computer Science: A Methodological Overview of Phase II",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2018/08659283/18j9fl1cFkQ",
"parentPublication": {
"id": "proceedings/fie/2018/1174/0",
"title": "2018 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/re/2022/7000/0/700000a255",
"title": "AnnoteREI! A Tool for Transcribing and Annotating Requirements Elicitation Interviews",
"doi": null,
"abstractUrl": "/proceedings-article/re/2022/700000a255/1HBKr4oU8Vi",
"parentPublication": {
"id": "proceedings/re/2022/7000/0",
"title": "2022 IEEE 30th International Requirements Engineering Conference (RE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a234",
"title": "Visual Analytic System for Subject Matter Expert Document Tagging using Information Retrieval and Semi-Supervised Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a234/1cMFaI00dyg",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vahc/2021/2067/0/206700a006",
"title": "Communicating Performance of Regression Models Using Visualization in Pharmacovigilance",
"doi": null,
"abstractUrl": "/proceedings-article/vahc/2021/206700a006/1z0ylclGF6E",
"parentPublication": {
"id": "proceedings/vahc/2021/2067/0",
"title": "2021 IEEE Workshop on Visual Analytics in Healthcare (VAHC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10076842",
"articleId": "1LFQ7mRf54s",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10077744",
"articleId": "1LH8EZ3NEGI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1LH8GA0ZG5a",
"name": "ttg555501-010077087s1-supp6-3259341.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010077087s1-supp6-3259341.pdf",
"extension": "pdf",
"size": "1.77 MB",
"__typename": "WebExtraType"
},
{
"id": "1LH8Gp97Xi0",
"name": "ttg555501-010077087s1-supp3-3259341.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010077087s1-supp3-3259341.pdf",
"extension": "pdf",
"size": "31.1 kB",
"__typename": "WebExtraType"
},
{
"id": "1LH8GuVDkzu",
"name": "ttg555501-010077087s1-supp2-3259341.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010077087s1-supp2-3259341.pdf",
"extension": "pdf",
"size": "63.7 kB",
"__typename": "WebExtraType"
},
{
"id": "1LH8GrDPlba",
"name": "ttg555501-010077087s1-supp4-3259341.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010077087s1-supp4-3259341.pdf",
"extension": "pdf",
"size": "9.34 MB",
"__typename": "WebExtraType"
},
{
"id": "1LH8GxepyZW",
"name": "ttg555501-010077087s1-supp5-3259341.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010077087s1-supp5-3259341.pdf",
"extension": "pdf",
"size": "403 kB",
"__typename": "WebExtraType"
},
{
"id": "1LH8GCIt0Pe",
"name": "ttg555501-010077087s1-supp1-3259341.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010077087s1-supp1-3259341.pdf",
"extension": "pdf",
"size": "50 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LFQ6PMpeik",
"doi": "10.1109/TVCG.2023.3259382",
"abstract": "Caustics are challenging light transport effects for photo-realistic rendering. Photon mapping techniques play a fundamental role in rendering caustics. However, photon mapping methods render single caustics under the stationary light source in a fixed scene view. They require significant storage and computing resources to produce high-quality results. In this paper, we propose efficiently rendering more diverse caustics of a scene with the camera and the light source moving. We present a novel learning-based volume rendering approach with implicit representations for our proposed task. Considering the variety of materials and textures of planar caustic receivers, we decompose the output appearance into two components: the diffuse and specular parts with a probabilistic module. Unlike NeRF, we construct weights for rendering each component from the implicit signed distance function (SDF). Moreover, we introduce the centering calibration and the sine activation function to improve the performance of the color prediction network. Extensive experiments on the synthetic and real-world datasets illustrate that our method achieves much better performance than baselines in the quantitative and qualitative comparison, for rendering caustics in novel views with the dynamic light source. Especially, our method outperforms the baseline on the temporal consistency across frames. Code will be available at <uri>https://github.com/JiaxiongQ/NeRC</uri>.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Caustics are challenging light transport effects for photo-realistic rendering. Photon mapping techniques play a fundamental role in rendering caustics. However, photon mapping methods render single caustics under the stationary light source in a fixed scene view. They require significant storage and computing resources to produce high-quality results. In this paper, we propose efficiently rendering more diverse caustics of a scene with the camera and the light source moving. We present a novel learning-based volume rendering approach with implicit representations for our proposed task. Considering the variety of materials and textures of planar caustic receivers, we decompose the output appearance into two components: the diffuse and specular parts with a probabilistic module. Unlike NeRF, we construct weights for rendering each component from the implicit signed distance function (SDF). Moreover, we introduce the centering calibration and the sine activation function to improve the performance of the color prediction network. Extensive experiments on the synthetic and real-world datasets illustrate that our method achieves much better performance than baselines in the quantitative and qualitative comparison, for rendering caustics in novel views with the dynamic light source. Especially, our method outperforms the baseline on the temporal consistency across frames. Code will be available at <uri>https://github.com/JiaxiongQ/NeRC</uri>.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Caustics are challenging light transport effects for photo-realistic rendering. Photon mapping techniques play a fundamental role in rendering caustics. However, photon mapping methods render single caustics under the stationary light source in a fixed scene view. They require significant storage and computing resources to produce high-quality results. In this paper, we propose efficiently rendering more diverse caustics of a scene with the camera and the light source moving. We present a novel learning-based volume rendering approach with implicit representations for our proposed task. Considering the variety of materials and textures of planar caustic receivers, we decompose the output appearance into two components: the diffuse and specular parts with a probabilistic module. Unlike NeRF, we construct weights for rendering each component from the implicit signed distance function (SDF). Moreover, we introduce the centering calibration and the sine activation function to improve the performance of the color prediction network. Extensive experiments on the synthetic and real-world datasets illustrate that our method achieves much better performance than baselines in the quantitative and qualitative comparison, for rendering caustics in novel views with the dynamic light source. Especially, our method outperforms the baseline on the temporal consistency across frames. Code will be available at https://github.com/JiaxiongQ/NeRC.",
"title": "NeRC: Rendering Planar Caustics by Learning Implicit Neural Representations",
"normalizedTitle": "NeRC: Rendering Planar Caustics by Learning Implicit Neural Representations",
"fno": "10077440",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Rendering Computer Graphics",
"Cameras",
"Light Sources",
"Photonics",
"Lighting",
"Image Color Analysis",
"Task Analysis",
"Caustics",
"Photon Mapping",
"Volume Rendering",
"Centering Calibration",
"Sine Activation"
],
"authors": [
{
"givenName": "Jiaxiong",
"surname": "Qiu",
"fullName": "Jiaxiong Qiu",
"affiliation": "TMCC, College of Computer Science, Nankai University, Tianjin, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ze-Xin",
"surname": "Yin",
"fullName": "Ze-Xin Yin",
"affiliation": "TMCC, College of Computer Science, Nankai University, Tianjin, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ming-Ming",
"surname": "Cheng",
"fullName": "Ming-Ming Cheng",
"affiliation": "TMCC, College of Computer Science, Nankai University, Tianjin, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bo",
"surname": "Ren",
"fullName": "Bo Ren",
"affiliation": "TMCC, College of Computer Science, Nankai University, Tianjin, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-11",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cw/2004/2140/0/21400039",
"title": "A Rapid Rendering Method for Caustics Arising from Refraction by Transparent Objects",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2004/21400039/12OmNvTTc81",
"parentPublication": {
"id": "proceedings/cw/2004/2140/0",
"title": "2004 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sera/2007/2867/0/28670274",
"title": "Caustics Effects with Photo-Realistic Rendering on Movie(?Cars?)",
"doi": null,
"abstractUrl": "/proceedings-article/sera/2007/28670274/12OmNwEJ0WU",
"parentPublication": {
"id": "proceedings/sera/2007/2867/0",
"title": "5th ACIS International Conference on Software Engineering Research, Management & Applications (SERA 2007)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cadgraphics/2011/4497/0/4497a071",
"title": "Real-Time Volume Caustics with Image-Based Photon Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/cadgraphics/2011/4497a071/12OmNy7Qfu0",
"parentPublication": {
"id": "proceedings/cadgraphics/2011/4497/0",
"title": "Computer-Aided Design and Computer Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07534852",
"title": "Correlated Photon Mapping for Interactive Global Illumination of Time-Varying Volumetric Data",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07534852/13rRUxZ0o1E",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2013/06/mcg2013060058",
"title": "Evaluating Progressive-Rendering Algorithms in Appearance Design Tasks",
"doi": null,
"abstractUrl": "/magazine/cg/2013/06/mcg2013060058/13rRUxjQyjL",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/06/06671591",
"title": "Importance Driven Environment Map Sampling",
"doi": null,
"abstractUrl": "/journal/tg/2014/06/06671591/13rRUxlgxTj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122364",
"title": "Historygrams: Enabling Interactive Global Illumination in Direct Volume Rendering using Photon Mapping",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122364/13rRUyYjK5h",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/07/08600345",
"title": "Precomputed Multiple Scattering for Rapid Light Simulation in Participating Media",
"doi": null,
"abstractUrl": "/journal/tg/2020/07/08600345/17D45Xh13tH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/12/09123589",
"title": "An Improved Augmented-Reality Framework for Differential Rendering Beyond the Lambertian-World Assumption",
"doi": null,
"abstractUrl": "/journal/tg/2021/12/09123589/1kTxwwg0epW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523836",
"title": "Foveated Photon Mapping",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523836/1wpquR1qr1S",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10076832",
"articleId": "1LFQ6Ir6DEQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10076842",
"articleId": "1LFQ7mRf54s",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1LH8FDTLAWc",
"name": "ttg555501-010077440s1-supp1-3259382.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010077440s1-supp1-3259382.mp4",
"extension": "mp4",
"size": "1.64 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LH8EZ3NEGI",
"doi": "10.1109/TVCG.2023.3260001",
"abstract": "During traditional surgeries, planning and instrument guidance is displayed on an external screen. Recent developments of augmented reality (AR) techniques can overcome obstacles including hand-eye discoordination and heavy mental load. Among these AR technologies, optical see-through (OST) schemes with stereoscopic displays can provide depth perception and retain the physical scene for safety considerations. However, limitations still exist in certain AR systems and the influence of these factors on surgical performance is yet to explore. To this end, experiments of multi-scale surgical tasks were carried out to compare head-mounted display (HMD) AR and autostereoscopic image overlay (AIO) AR, concerning objective performance and subjective evaluation. To solely analyze effects brought by display techniques, the tracking system in each included display system was identical and similar tracking accuracy was proved by a preliminary experiment. Focus and context rendering was utilized to enhance in-situ visualization for surgical guidance. Latency values of all display systems were assessed and a delay experiment proved the latency differences had no significant impact on user performance. Results of multi-scale surgical tasks showed that HMD outperformed in detailed operations probably due to stable resolution along the depth axis, while AIO had better performance in larger-scale operations for better depth perception. This paper helps point out the critical limitations of current OST AR techniques and potentially promotes the progress of AR applications in surgical guidance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "During traditional surgeries, planning and instrument guidance is displayed on an external screen. Recent developments of augmented reality (AR) techniques can overcome obstacles including hand-eye discoordination and heavy mental load. Among these AR technologies, optical see-through (OST) schemes with stereoscopic displays can provide depth perception and retain the physical scene for safety considerations. However, limitations still exist in certain AR systems and the influence of these factors on surgical performance is yet to explore. To this end, experiments of multi-scale surgical tasks were carried out to compare head-mounted display (HMD) AR and autostereoscopic image overlay (AIO) AR, concerning objective performance and subjective evaluation. To solely analyze effects brought by display techniques, the tracking system in each included display system was identical and similar tracking accuracy was proved by a preliminary experiment. Focus and context rendering was utilized to enhance in-situ visualization for surgical guidance. Latency values of all display systems were assessed and a delay experiment proved the latency differences had no significant impact on user performance. Results of multi-scale surgical tasks showed that HMD outperformed in detailed operations probably due to stable resolution along the depth axis, while AIO had better performance in larger-scale operations for better depth perception. This paper helps point out the critical limitations of current OST AR techniques and potentially promotes the progress of AR applications in surgical guidance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "During traditional surgeries, planning and instrument guidance is displayed on an external screen. Recent developments of augmented reality (AR) techniques can overcome obstacles including hand-eye discoordination and heavy mental load. Among these AR technologies, optical see-through (OST) schemes with stereoscopic displays can provide depth perception and retain the physical scene for safety considerations. However, limitations still exist in certain AR systems and the influence of these factors on surgical performance is yet to explore. To this end, experiments of multi-scale surgical tasks were carried out to compare head-mounted display (HMD) AR and autostereoscopic image overlay (AIO) AR, concerning objective performance and subjective evaluation. To solely analyze effects brought by display techniques, the tracking system in each included display system was identical and similar tracking accuracy was proved by a preliminary experiment. Focus and context rendering was utilized to enhance in-situ visualization for surgical guidance. Latency values of all display systems were assessed and a delay experiment proved the latency differences had no significant impact on user performance. Results of multi-scale surgical tasks showed that HMD outperformed in detailed operations probably due to stable resolution along the depth axis, while AIO had better performance in larger-scale operations for better depth perception. This paper helps point out the critical limitations of current OST AR techniques and potentially promotes the progress of AR applications in surgical guidance.",
"title": "A Comparative Evaluation of Optical See-through Augmented Reality in Surgical Guidance",
"normalizedTitle": "A Comparative Evaluation of Optical See-through Augmented Reality in Surgical Guidance",
"fno": "10077744",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Surgery",
"Resists",
"Navigation",
"Task Analysis",
"Optical Imaging",
"Display Systems",
"Biomedical Optical Imaging",
"Augmented Reality",
"Autostereoscopic Image Overlay",
"Head Mounted Display",
"Optical See Through",
"Surgical Guidance"
],
"authors": [
{
"givenName": "Ruiyang",
"surname": "Li",
"fullName": "Ruiyang Li",
"affiliation": "Department of Biomedical Engineering, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Boxuan",
"surname": "Han",
"fullName": "Boxuan Han",
"affiliation": "Department of Biomedical Engineering, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Haowei",
"surname": "Li",
"fullName": "Haowei Li",
"affiliation": "Department of Biomedical Engineering, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Longfei",
"surname": "Ma",
"fullName": "Longfei Ma",
"affiliation": "Department of Biomedical Engineering, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xinran",
"surname": "Zhang",
"fullName": "Xinran Zhang",
"affiliation": "Department of Biomedical Engineering, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhe",
"surname": "Zhao",
"fullName": "Zhe Zhao",
"affiliation": "Department of Orthopaedics, Tsinghua University, Beijing Tsinghua Changgung Hospital. School of Clinical Medicine, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hongen",
"surname": "Liao",
"fullName": "Hongen Liao",
"affiliation": "Department of Biomedical Engineering, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2015/7660/0/7660a043",
"title": "Simultaneous Direct and Augmented View Distortion Calibration of Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a043/12OmNC1oT64",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a052",
"title": "[POSTER] Hybrid Video/Optical See-Through HMD",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a052/12OmNy4r3Ph",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2012/2049/0/06266298",
"title": "Inexpensive monocular pico-projector-based augmented reality display for surgical microscope",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2012/06266298/12OmNzvhvy0",
"parentPublication": {
"id": "proceedings/cbms/2012/2049/0",
"title": "2012 25th IEEE International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/var4good/2018/5977/0/08576884",
"title": "Augmented Visual Instruction for Surgical Practice and Training",
"doi": null,
"abstractUrl": "/proceedings-article/var4good/2018/08576884/17D45WODasn",
"parentPublication": {
"id": "proceedings/var4good/2018/5977/0",
"title": "2018 IEEE Workshop on Augmented and Virtual Realities for Good (VAR4Good)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a402",
"title": "AR-Assisted Surgical Guidance System for Ventriculostomy",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a402/1CJdTYykk5W",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797682",
"title": "Interactive and Multimodal-based Augmented Reality for Remote Assistance using a Digital Surgical Microscope",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797682/1cJ12jTP75S",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998139",
"title": "Factored Occlusion: Single Spatial Light Modulator Occlusion-capable Optical See-through Augmented Reality Display",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998139/1hrXe0Hbv0I",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a080",
"title": "Can Retinal Projection Displays Improve Spatial Perception in Augmented Reality?",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a080/1pysvYTZF6w",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a454",
"title": "Augmented Reality based Surgical Navigation for Percutaneous Endoscopic Transforaminal Discectomy",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a454/1tnWxe3BhxS",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a456",
"title": "Augmented Reality based Surgical Navigation for Percutaneous Endoscopic Transforaminal Discectomy",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a456/1tnXaPRVToI",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10077087",
"articleId": "1LFQ7zitdtK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10081495",
"articleId": "1LRbQCd2D7O",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1LINp9BoKBy",
"name": "ttg555501-010077744s1-supp1-3260001.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010077744s1-supp1-3260001.mp4",
"extension": "mp4",
"size": "21.2 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LAuCOR3RE4",
"doi": "10.1109/TVCG.2023.3258693",
"abstract": "Head tracking is commonly used in VR applications to allow users to naturally view 3D content using physical head movement, but many applications also support turning with hand-held controllers. Controller and joystick controls are convenient for practical settings where full 360-degree physical rotation is not possible, such as when the user is sitting at a desk. Though controller-based rotation provides the benefit of convenience, previous research has demonstrated that virtual or joystick-controlled view rotation to have drawbacks of sickness and disorientation compared to physical turning. To combat such issues, researchers have considered various techniques such as speed adjustments or reduced field of view, but data is limited on how different variations for joystick rotation influences sickness and orientation perception. Our studies include different variations of techniques such as joystick rotation, resetting, and field-of-view reduction. We investigate trade-offs among different techniques in terms of sickness and the ability to maintain spatial orientation. In two controlled experiments, participants traveled through a sequence of rooms and were tested on spatial orientation, and we also collected subjective measures of sickness and preference. Our findings indicate a preference by users towards directly-manipulated joystick-based rotations compared to user-initiated resetting and minimal effects of technique on spatial awareness.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Head tracking is commonly used in VR applications to allow users to naturally view 3D content using physical head movement, but many applications also support turning with hand-held controllers. Controller and joystick controls are convenient for practical settings where full 360-degree physical rotation is not possible, such as when the user is sitting at a desk. Though controller-based rotation provides the benefit of convenience, previous research has demonstrated that virtual or joystick-controlled view rotation to have drawbacks of sickness and disorientation compared to physical turning. To combat such issues, researchers have considered various techniques such as speed adjustments or reduced field of view, but data is limited on how different variations for joystick rotation influences sickness and orientation perception. Our studies include different variations of techniques such as joystick rotation, resetting, and field-of-view reduction. We investigate trade-offs among different techniques in terms of sickness and the ability to maintain spatial orientation. In two controlled experiments, participants traveled through a sequence of rooms and were tested on spatial orientation, and we also collected subjective measures of sickness and preference. Our findings indicate a preference by users towards directly-manipulated joystick-based rotations compared to user-initiated resetting and minimal effects of technique on spatial awareness.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Head tracking is commonly used in VR applications to allow users to naturally view 3D content using physical head movement, but many applications also support turning with hand-held controllers. Controller and joystick controls are convenient for practical settings where full 360-degree physical rotation is not possible, such as when the user is sitting at a desk. Though controller-based rotation provides the benefit of convenience, previous research has demonstrated that virtual or joystick-controlled view rotation to have drawbacks of sickness and disorientation compared to physical turning. To combat such issues, researchers have considered various techniques such as speed adjustments or reduced field of view, but data is limited on how different variations for joystick rotation influences sickness and orientation perception. Our studies include different variations of techniques such as joystick rotation, resetting, and field-of-view reduction. We investigate trade-offs among different techniques in terms of sickness and the ability to maintain spatial orientation. In two controlled experiments, participants traveled through a sequence of rooms and were tested on spatial orientation, and we also collected subjective measures of sickness and preference. Our findings indicate a preference by users towards directly-manipulated joystick-based rotations compared to user-initiated resetting and minimal effects of technique on spatial awareness.",
"title": "An Evaluation of View Rotation Techniques for Seated Navigation in Virtual Reality",
"normalizedTitle": "An Evaluation of View Rotation Techniques for Seated Navigation in Virtual Reality",
"fno": "10075482",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Legged Locomotion",
"Turning",
"Tracking",
"Teleportation",
"Virtual Environments",
"Navigation",
"Three Dimensional Displays",
"Human Centered Computing",
"Human Computer Interaction",
"Virtual Reality"
],
"authors": [
{
"givenName": "Brett",
"surname": "Benda",
"fullName": "Brett Benda",
"affiliation": "University of Florida, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shyam Prathish",
"surname": "Sargunam",
"fullName": "Shyam Prathish Sargunam",
"affiliation": "Autodesk, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mahsan",
"surname": "Nourani",
"fullName": "Mahsan Nourani",
"affiliation": "University of Florida, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Eric D.",
"surname": "Ragan",
"fullName": "Eric D. Ragan",
"affiliation": "University of Florida, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2014/3624/0/06798852",
"title": "Reorientation in virtual environments using interactive portals",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2014/06798852/12OmNqBbHVR",
"parentPublication": {
"id": "proceedings/3dui/2014/3624/0",
"title": "2014 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892227",
"title": "Guided head rotation and amplified head rotation: Evaluating semi-natural travel and viewing techniques in virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892227/12OmNwseEYz",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/04/07384536",
"title": "Examining Rotation Gain in CAVE-like Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2016/04/07384536/13rRUxOdD2H",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09733261",
"title": "One-step out-of-place resetting for redirected walking in VR",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09733261/1BENJyPkx5S",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09737429",
"title": "Intentional Head-Motion Assisted Locomotion for Reducing Cybersickness",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09737429/1BQidPzNjBS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798286",
"title": "Evaluating the Effectiveness of Redirected Walking with Auditory Distractors for Navigation in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798286/1cJ0PIoIPV6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797994",
"title": "Redirecting View Rotation in Immersive Movies with Washout Filters",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797994/1cJ19tjOG2s",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090536",
"title": "Elastic-Move: Passive Haptic Device with Force Feedback for Virtual Reality Locomotion",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090536/1jIxqFQXvSE",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a627",
"title": "The Cognitive Load and Usability of Three Walking Metaphors for Consumer Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a627/1pysyecdlzq",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a321",
"title": "Simultaneous Real Walking and Asymmetric Input in Virtual Reality with a Smartphone-based Hybrid Interface",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a321/1yeQEyk3fbO",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10070611",
"articleId": "1LvvYkEy8XC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10076255",
"articleId": "1LAuCZbIwdG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1LH8FUSRpvy",
"name": "ttg555501-010075482s1-supp1-3258693.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010075482s1-supp1-3258693.mp4",
"extension": "mp4",
"size": "69.2 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LAuCZbIwdG",
"doi": "10.1109/TVCG.2023.3258440",
"abstract": "Wildfires affect many regions across the world. The accelerated progression of global warming has amplified their frequency and scale, deepening their impact on human life, the economy, and the environment. The temperature rise has been driving wildfires to behave unpredictably compared to those previously observed, challenging researchers and fire management agencies to understand the factors behind this behavioral change. Furthermore, this change has rendered fire personnel training outdated and lost its ability to adequately prepare personnel to respond to these new fires. Immersive visualization can play a key role in tackling the growing issue of wildfires. Therefore, this survey reviews various studies that use immersive and non-immersive data visualization techniques to depict wildfire behavior and train first responders and planners. This paper identifies the most useful characteristics of these systems. While these studies support knowledge creation for certain situations, there is still scope to comprehensively improve immersive systems to address the unforeseen dynamics of wildfires.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Wildfires affect many regions across the world. The accelerated progression of global warming has amplified their frequency and scale, deepening their impact on human life, the economy, and the environment. The temperature rise has been driving wildfires to behave unpredictably compared to those previously observed, challenging researchers and fire management agencies to understand the factors behind this behavioral change. Furthermore, this change has rendered fire personnel training outdated and lost its ability to adequately prepare personnel to respond to these new fires. Immersive visualization can play a key role in tackling the growing issue of wildfires. Therefore, this survey reviews various studies that use immersive and non-immersive data visualization techniques to depict wildfire behavior and train first responders and planners. This paper identifies the most useful characteristics of these systems. While these studies support knowledge creation for certain situations, there is still scope to comprehensively improve immersive systems to address the unforeseen dynamics of wildfires.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Wildfires affect many regions across the world. The accelerated progression of global warming has amplified their frequency and scale, deepening their impact on human life, the economy, and the environment. The temperature rise has been driving wildfires to behave unpredictably compared to those previously observed, challenging researchers and fire management agencies to understand the factors behind this behavioral change. Furthermore, this change has rendered fire personnel training outdated and lost its ability to adequately prepare personnel to respond to these new fires. Immersive visualization can play a key role in tackling the growing issue of wildfires. Therefore, this survey reviews various studies that use immersive and non-immersive data visualization techniques to depict wildfire behavior and train first responders and planners. This paper identifies the most useful characteristics of these systems. While these studies support knowledge creation for certain situations, there is still scope to comprehensively improve immersive systems to address the unforeseen dynamics of wildfires.",
"title": "Analysis of Wildfire Visualization Systems for Research and Training: Are They up for the Challenge of the Current State of Wildfires?",
"normalizedTitle": "Analysis of Wildfire Visualization Systems for Research and Training: Are They up for the Challenge of the Current State of Wildfires?",
"fno": "10076255",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Behavioral Sciences",
"Training",
"Personnel",
"Australia",
"Mathematical Models",
"Software",
"Immersive Wildfire Training",
"Immersive Wildfire Visualization",
"Modelling",
"Simulation",
"Wildfire",
"Wildfire Visualization"
],
"authors": [
{
"givenName": "Carlos A. Tirado",
"surname": "Cortes",
"fullName": "Carlos A. Tirado Cortes",
"affiliation": "iCinema Research Centre, The University of New South Wales, Kensington, NSW, Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Susanne",
"surname": "Thurow",
"fullName": "Susanne Thurow",
"affiliation": "iCinema Research Centre, The University of New South Wales, Kensington, NSW, Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Alex",
"surname": "Ong",
"fullName": "Alex Ong",
"affiliation": "iCinema Research Centre, The University of New South Wales, Kensington, NSW, Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jason J.",
"surname": "Sharples",
"fullName": "Jason J. Sharples",
"affiliation": "School of Science, The University of New South Wales, Canberra, ACT, Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tomasz",
"surname": "Bednarz",
"fullName": "Tomasz Bednarz",
"affiliation": "NVIDIA Corporation, Sydney, NSW, Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Grant",
"surname": "Stevens",
"fullName": "Grant Stevens",
"affiliation": "iCinema Research Centre, The University of New South Wales, Kensington, NSW, Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dennis Del",
"surname": "Favero",
"fullName": "Dennis Del Favero",
"affiliation": "iCinema Research Centre, The University of New South Wales, Kensington, NSW, Australia",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-20",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-vis/1997/8262/0/82620451",
"title": "Wildfire Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1997/82620451/12OmNqBKTYZ",
"parentPublication": {
"id": "proceedings/ieee-vis/1997/8262/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2007/0905/0/04161033",
"title": "VRFire: an Immersive Visualization Experience for Wildfire Spread Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2007/04161033/12OmNxX3uFj",
"parentPublication": {
"id": "proceedings/vr/2007/0905/0",
"title": "2007 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/msn/2014/7394/0/7394a046",
"title": "Multisensor Data Fusion for Wildfire Warning",
"doi": null,
"abstractUrl": "/proceedings-article/msn/2014/7394a046/12OmNxdDFIr",
"parentPublication": {
"id": "proceedings/msn/2014/7394/0",
"title": "2014 10th International Conference on Mobile Ad-hoc and Sensor Networks (MSN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2008/1971/0/04480791",
"title": "VFire: Virtual Fire in Realistic Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480791/12OmNy4IEXY",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2021/3902/0/09671680",
"title": "Wildfire Occurrence Prediction Using Time Series Classification: A Comparative Study",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2021/09671680/1A8jf7dgTio",
"parentPublication": {
"id": "proceedings/big-data/2021/3902/0",
"title": "2021 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmss/2022/9685/0/968500a087",
"title": "Supplier Partner Dependency from the Perspective of Social Exchange: The Moderating Effect of Partner Power",
"doi": null,
"abstractUrl": "/proceedings-article/icmss/2022/968500a087/1F8z6HlWQRW",
"parentPublication": {
"id": "proceedings/icmss/2022/9685/0",
"title": "2022 International Conference on Management Engineering, Software Engineering and Service Sciences (ICMSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ickg/2022/5101/0/510100a365",
"title": "Multi-time Predictions of Wildfire Grid Map using Remote Sensing Local Data",
"doi": null,
"abstractUrl": "/proceedings-article/ickg/2022/510100a365/1KxU1X7KJCU",
"parentPublication": {
"id": "proceedings/ickg/2022/5101/0",
"title": "2022 IEEE International Conference on Knowledge Graph (ICKG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icict/2020/7283/0/728300a031",
"title": "Data-Enabled Correlation Analysis between Wildfire and Climate using GIS",
"doi": null,
"abstractUrl": "/proceedings-article/icict/2020/728300a031/1jPb5rrNsJy",
"parentPublication": {
"id": "proceedings/icict/2020/7283/0",
"title": "2020 3rd International Conference on Information and Computer Technologies (ICICT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wowmom/2020/7374/0/737400a335",
"title": "Wildfire Spread Modeling with Aerial Image Processing",
"doi": null,
"abstractUrl": "/proceedings-article/wowmom/2020/737400a335/1nMQE8eDmNy",
"parentPublication": {
"id": "proceedings/wowmom/2020/7374/0",
"title": "2020 IEEE 21st International Symposium on \"A World of Wireless, Mobile and Multimedia Networks\" (WoWMoM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cds/2021/0428/0/042800a562",
"title": "Predictive Modeling of Wildfires in the United States",
"doi": null,
"abstractUrl": "/proceedings-article/cds/2021/042800a562/1uZxu58EWac",
"parentPublication": {
"id": "proceedings/cds/2021/0428/0",
"title": "2021 2nd International Conference on Computing and Data Science (CDS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10075482",
"articleId": "1LAuCOR3RE4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10076909",
"articleId": "1LFQ6bbu1Wg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1LH8GeZg3EQ",
"name": "ttg555501-010076255s1-supp1-3258440.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010076255s1-supp1-3258440.pdf",
"extension": "pdf",
"size": "11.8 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LvvYkEy8XC",
"doi": "10.1109/TVCG.2023.3257035",
"abstract": "Deep neural networks (DNNs) have been widely used for mesh processing in recent years. However, current DNNs can not process arbitrary meshes efficiently. On the one hand, most DNNs expect 2-manifold, watertight meshes, but many meshes, whether manually designed or automatically generated, may have gaps, non-manifold geometry, or other defects. On the other hand, the irregular structure of meshes also brings challenges to building hierarchical structures and aggregating local geometric information, which is critical to conduct DNNs. In this paper, we present DGNet, an efficient, effective and generic deep neural mesh processing network based on dual graph pyramids; it can handle arbitrary meshes. Firstly, we construct dual graph pyramids for meshes to guide feature propagation between hierarchical levels for both downsampling and upsampling. Secondly, we propose a novel convolution to aggregate local features on the proposed hierarchical graphs. By utilizing both geodesic neighbors and Euclidean neighbors, the network enables feature aggregation both within local surface patches and between isolated mesh components. Experimental results demonstrate that DGNet can be applied to both shape analysis and large-scale scene understanding. Furthermore, it achieves superior performance on various benchmarks, including ShapeNetCore, HumanBody, ScanNet and Matterport3D. Code and models will be available at <uri>https://github.com/li-xl/DGNet.</uri>",
"abstracts": [
{
"abstractType": "Regular",
"content": "Deep neural networks (DNNs) have been widely used for mesh processing in recent years. However, current DNNs can not process arbitrary meshes efficiently. On the one hand, most DNNs expect 2-manifold, watertight meshes, but many meshes, whether manually designed or automatically generated, may have gaps, non-manifold geometry, or other defects. On the other hand, the irregular structure of meshes also brings challenges to building hierarchical structures and aggregating local geometric information, which is critical to conduct DNNs. In this paper, we present DGNet, an efficient, effective and generic deep neural mesh processing network based on dual graph pyramids; it can handle arbitrary meshes. Firstly, we construct dual graph pyramids for meshes to guide feature propagation between hierarchical levels for both downsampling and upsampling. Secondly, we propose a novel convolution to aggregate local features on the proposed hierarchical graphs. By utilizing both geodesic neighbors and Euclidean neighbors, the network enables feature aggregation both within local surface patches and between isolated mesh components. Experimental results demonstrate that DGNet can be applied to both shape analysis and large-scale scene understanding. Furthermore, it achieves superior performance on various benchmarks, including ShapeNetCore, HumanBody, ScanNet and Matterport3D. Code and models will be available at <uri>https://github.com/li-xl/DGNet.</uri>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Deep neural networks (DNNs) have been widely used for mesh processing in recent years. However, current DNNs can not process arbitrary meshes efficiently. On the one hand, most DNNs expect 2-manifold, watertight meshes, but many meshes, whether manually designed or automatically generated, may have gaps, non-manifold geometry, or other defects. On the other hand, the irregular structure of meshes also brings challenges to building hierarchical structures and aggregating local geometric information, which is critical to conduct DNNs. In this paper, we present DGNet, an efficient, effective and generic deep neural mesh processing network based on dual graph pyramids; it can handle arbitrary meshes. Firstly, we construct dual graph pyramids for meshes to guide feature propagation between hierarchical levels for both downsampling and upsampling. Secondly, we propose a novel convolution to aggregate local features on the proposed hierarchical graphs. By utilizing both geodesic neighbors and Euclidean neighbors, the network enables feature aggregation both within local surface patches and between isolated mesh components. Experimental results demonstrate that DGNet can be applied to both shape analysis and large-scale scene understanding. Furthermore, it achieves superior performance on various benchmarks, including ShapeNetCore, HumanBody, ScanNet and Matterport3D. Code and models will be available at https://github.com/li-xl/DGNet.",
"title": "Mesh Neural Networks Based on Dual Graph Pyramids",
"normalizedTitle": "Mesh Neural Networks Based on Dual Graph Pyramids",
"fno": "10070611",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Neural Networks",
"Convolution",
"Three Dimensional Displays",
"Feature Extraction",
"Faces",
"Shape",
"Point Cloud Compression",
"Geometric Understanding",
"Mesh Processing",
"Neural Networks",
"Shape Analysis"
],
"authors": [
{
"givenName": "Xiang-Li",
"surname": "Li",
"fullName": "Xiang-Li Li",
"affiliation": "BNRist, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zheng-Ning",
"surname": "Liu",
"fullName": "Zheng-Ning Liu",
"affiliation": "Fitten Tech Co., Ltd., Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tuo",
"surname": "Chen",
"fullName": "Tuo Chen",
"affiliation": "BNRist, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tai-Jiang",
"surname": "Mu",
"fullName": "Tai-Jiang Mu",
"affiliation": "BNRist, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ralph R.",
"surname": "Martin",
"fullName": "Ralph R. Martin",
"affiliation": "School of Computer Science and Informatics, Cardiff University, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shi-Min",
"surname": "Hu",
"fullName": "Shi-Min Hu",
"affiliation": "BNRist, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cad-cg/2005/2473/0/24730359",
"title": "Dual Graph of a Mesh Partition for Interactive Analysis of Huge Digital Mockups",
"doi": null,
"abstractUrl": "/proceedings-article/cad-cg/2005/24730359/12OmNAR1aSe",
"parentPublication": {
"id": "proceedings/cad-cg/2005/2473/0",
"title": "Ninth International Conference on Computer Aided Design and Computer Graphics (CAD-CG'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1994/6275/0/00577171",
"title": "Dual graph contraction for irregular pyramids",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1994/00577171/12OmNvT2p5c",
"parentPublication": {
"id": "proceedings/icpr/1994/6275/0",
"title": "12th IAPR International Conference on Pattern Recognition, 1994",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/2000/0868/0/08680235",
"title": "Efficient Coding of Non-Triangular Mesh Connectivity",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2000/08680235/12OmNwdtwaQ",
"parentPublication": {
"id": "proceedings/pg/2000/0868/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600s8562",
"title": "Neural Mesh Simplification",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600s8562/1H1ioE9MrPG",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09978684",
"title": "Adaptively Isotropic Remeshing based on Curvature Smoothed Field",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09978684/1IXUnEM2oc8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10024001",
"title": "AGConv: Adaptive Graph Convolution on 3D Point Clouds",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10024001/1K9spf0w0Ug",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10086697",
"title": "Unsupervised Point Cloud Representation Learning with Deep Neural Networks: A Survey",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10086697/1LUpwIzKbgQ",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/01/08764385",
"title": "Feature-Preserving Tensor Voting Model for Mesh Steganalysis",
"doi": null,
"abstractUrl": "/journal/tg/2021/01/08764385/1bIeIXheQVy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/08/09296808",
"title": "Mesh Denoising With Facet Graph Convolutions",
"doi": null,
"abstractUrl": "/journal/tg/2022/08/09296808/1pDnJLfMBWg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800a289",
"title": "Dual Mesh Convolutional Networks for Human Shape Correspondence",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800a289/1zWEbXFtOlq",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10066837",
"articleId": "1LtR7JYxVEk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10075482",
"articleId": "1LAuCOR3RE4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LtR7JYxVEk",
"doi": "10.1109/TVCG.2023.3255991",
"abstract": "This study aims to allow users to perform dexterous hand manipulation of objects in virtual environments with hand-held VR controllers. To this end, the VR controller is mapped to the virtual hand and the hand motions are dynamically synthesized when the virtual hand approaches an object. At each frame, given the information about the virtual hand, VR controller input, and hand-object spatial relations, the deep neural network determines the desired joint orientations of the virtual hand model in the next frame. The desired orientations are then converted into a set of torques acting on hand joints and applied to a physics simulation to determine the hand pose at the next frame. The deep neural network, named VR-HandNet, is trained with a reinforcement learning-based approach. Therefore, it can produce physically plausible hand motion since the trial-and-error training process can learn how the interaction between hand and object is performed under the environment that is simulated by a physics engine. Furthermore, we adopted an imitation learning paradigm to increase visual plausibility by mimicking the reference motion datasets. Through the ablation studies, we validated the proposed method is effectively constructed and successfully serves our design goal. A live demo is demonstrated in the supplementary video.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This study aims to allow users to perform dexterous hand manipulation of objects in virtual environments with hand-held VR controllers. To this end, the VR controller is mapped to the virtual hand and the hand motions are dynamically synthesized when the virtual hand approaches an object. At each frame, given the information about the virtual hand, VR controller input, and hand-object spatial relations, the deep neural network determines the desired joint orientations of the virtual hand model in the next frame. The desired orientations are then converted into a set of torques acting on hand joints and applied to a physics simulation to determine the hand pose at the next frame. The deep neural network, named VR-HandNet, is trained with a reinforcement learning-based approach. Therefore, it can produce physically plausible hand motion since the trial-and-error training process can learn how the interaction between hand and object is performed under the environment that is simulated by a physics engine. Furthermore, we adopted an imitation learning paradigm to increase visual plausibility by mimicking the reference motion datasets. Through the ablation studies, we validated the proposed method is effectively constructed and successfully serves our design goal. A live demo is demonstrated in the supplementary video.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This study aims to allow users to perform dexterous hand manipulation of objects in virtual environments with hand-held VR controllers. To this end, the VR controller is mapped to the virtual hand and the hand motions are dynamically synthesized when the virtual hand approaches an object. At each frame, given the information about the virtual hand, VR controller input, and hand-object spatial relations, the deep neural network determines the desired joint orientations of the virtual hand model in the next frame. The desired orientations are then converted into a set of torques acting on hand joints and applied to a physics simulation to determine the hand pose at the next frame. The deep neural network, named VR-HandNet, is trained with a reinforcement learning-based approach. Therefore, it can produce physically plausible hand motion since the trial-and-error training process can learn how the interaction between hand and object is performed under the environment that is simulated by a physics engine. Furthermore, we adopted an imitation learning paradigm to increase visual plausibility by mimicking the reference motion datasets. Through the ablation studies, we validated the proposed method is effectively constructed and successfully serves our design goal. A live demo is demonstrated in the supplementary video.",
"title": "VR-HandNet: A Visually and Physically Plausible Hand Manipulation System in Virtual Reality",
"normalizedTitle": "VR-HandNet: A Visually and Physically Plausible Hand Manipulation System in Virtual Reality",
"fno": "10066837",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Tracking",
"Three Dimensional Displays",
"Shape",
"Physics",
"Grasping",
"Deep Learning",
"Visualization",
"Hand Manipulation",
"Physics Based Animation",
"Reinforcement Learning",
"Virtual Reality"
],
"authors": [
{
"givenName": "DongHeun",
"surname": "Han",
"fullName": "DongHeun Han",
"affiliation": "IIIXR LAB at the Department of Software Convergence, Kyung Hee University, Yongin, South Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "RoUn",
"surname": "Lee",
"fullName": "RoUn Lee",
"affiliation": "IIIXR LAB at the Department of Software Convergence, Kyung Hee University, Yongin, South Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "KyeongMin",
"surname": "Kim",
"fullName": "KyeongMin Kim",
"affiliation": "IIIXR LAB at the Department of Software Convergence, Kyung Hee University, Yongin, South Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "HyeongYeop",
"surname": "Kang",
"fullName": "HyeongYeop Kang",
"affiliation": "IIIXR LAB at the Department of Software Convergence, Kyung Hee University, Yongin, South Korea",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2011/0039/0/05759430",
"title": "A soft hand model for physically-based manipulation of virtual objects",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2011/05759430/12OmNBpEeRU",
"parentPublication": {
"id": "proceedings/vr/2011/0039/0",
"title": "2011 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2010/7846/0/05571178",
"title": "EMG Biofeedback Based VR System for Hand Rotation and Grasping Rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2010/05571178/12OmNxRnvUd",
"parentPublication": {
"id": "proceedings/iv/2010/7846/0",
"title": "2010 14th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a493",
"title": "Eye Tracking-based LSTM for Locomotion Prediction in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a493/1CJcrKWnUtO",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600u0545",
"title": "D-Grasp: Physically Plausible Dynamic Grasp Synthesis for Hand-Object Interactions",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600u0545/1H0Nqno8Tw4",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a566",
"title": "A cup of coffee in Mixed Reality: analysis of movements' smoothness from real to virtual",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a566/1J7Waw7xSy4",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a905",
"title": "Haptics in VR Using Origami-Augmented Drones",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a905/1J7WrPcWIVO",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10023999",
"title": "VR Blowing: A Physically Plausible Interaction Method for Blowing Air in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10023999/1K9ssyL8VvG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049658",
"title": "Comparing Different Grasping Visualizations for Object Manipulation in VR using Controllers",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049658/1KYotjCVD7W",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049645",
"title": "GestureSurface: VR Sketching through Assembling Scaffold Surface with Non-Dominant Hand",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049645/1KYoyLX55fy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a243",
"title": "Continuous VR Weight Illusion by Combining Adaptive Trigger Resistance and Control-Display Ratio Manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a243/1MNgyZ3pLFe",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10068257",
"articleId": "1LtR7CeyeHe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10070611",
"articleId": "1LvvYkEy8XC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1LvvXRrzl60",
"name": "ttg555501-010066837s1-supp1-3255991.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010066837s1-supp1-3255991.mp4",
"extension": "mp4",
"size": "53.5 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LtR7CeyeHe",
"doi": "10.1109/TVCG.2023.3256376",
"abstract": "Multivariate datasets with many variables are increasingly common in many application areas. Most methods approach multivariate data from a singular perspective. Subspace analysis techniques, on the other hand. provide the user a set of subspaces which can be used to view the data from multiple perspectives. However, many subspace analysis methods produce a huge amount of subspaces, a number of which are usually redundant. The enormity of the number of subspaces can be overwhelming to analysts, making it difficult for them to find informative patterns in the data. In this paper, we propose a new paradigm that constructs <italic>semantically consistent</italic> subspaces. These subspaces can then be expanded into more general subspaces by ways of conventional techniques. Our framework uses the labels/meta-data of a dataset to learn the semantic meanings and associations of the attributes. We employ a neural network to learn a semantic word embedding of the attributes and then divide this attribute space into semantically consistent subspaces. The user is provided with a visual analytics interface that guides the analysis process. We show via various examples that these <italic>semantic subspaces</italic> can help organize the data and guide the user in finding interesting patterns in the dataset.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Multivariate datasets with many variables are increasingly common in many application areas. Most methods approach multivariate data from a singular perspective. Subspace analysis techniques, on the other hand. provide the user a set of subspaces which can be used to view the data from multiple perspectives. However, many subspace analysis methods produce a huge amount of subspaces, a number of which are usually redundant. The enormity of the number of subspaces can be overwhelming to analysts, making it difficult for them to find informative patterns in the data. In this paper, we propose a new paradigm that constructs <italic>semantically consistent</italic> subspaces. These subspaces can then be expanded into more general subspaces by ways of conventional techniques. Our framework uses the labels/meta-data of a dataset to learn the semantic meanings and associations of the attributes. We employ a neural network to learn a semantic word embedding of the attributes and then divide this attribute space into semantically consistent subspaces. The user is provided with a visual analytics interface that guides the analysis process. We show via various examples that these <italic>semantic subspaces</italic> can help organize the data and guide the user in finding interesting patterns in the dataset.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Multivariate datasets with many variables are increasingly common in many application areas. Most methods approach multivariate data from a singular perspective. Subspace analysis techniques, on the other hand. provide the user a set of subspaces which can be used to view the data from multiple perspectives. However, many subspace analysis methods produce a huge amount of subspaces, a number of which are usually redundant. The enormity of the number of subspaces can be overwhelming to analysts, making it difficult for them to find informative patterns in the data. In this paper, we propose a new paradigm that constructs semantically consistent subspaces. These subspaces can then be expanded into more general subspaces by ways of conventional techniques. Our framework uses the labels/meta-data of a dataset to learn the semantic meanings and associations of the attributes. We employ a neural network to learn a semantic word embedding of the attributes and then divide this attribute space into semantically consistent subspaces. The user is provided with a visual analytics interface that guides the analysis process. We show via various examples that these semantic subspaces can help organize the data and guide the user in finding interesting patterns in the dataset.",
"title": "Interactive Subspace Cluster Analysis Guided by Semantic Attribute Associations",
"normalizedTitle": "Interactive Subspace Cluster Analysis Guided by Semantic Attribute Associations",
"fno": "10068257",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Semantics",
"Visual Analytics",
"Data Visualization",
"Three Dimensional Displays",
"Task Analysis",
"Standards",
"Space Exploration",
"High Dimensional Data",
"Multivariate Data",
"Subspace Clustering",
"Subspace Analysis",
"Cluster Analysis"
],
"authors": [
{
"givenName": "Salman",
"surname": "Mahmood",
"fullName": "Salman Mahmood",
"affiliation": "Computer Science Department, Stony Brook University, New York, NY, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Klaus",
"surname": "Mueller",
"fullName": "Klaus Mueller",
"affiliation": "Computer Science Department, Stony Brook University, New York, NY, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2008/3268/0/3268a381",
"title": "Voyage Analysis Applied to Geovisual Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2008/3268a381/12OmNAle6lS",
"parentPublication": {
"id": "proceedings/iv/2008/3268/0",
"title": "2008 12th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2012/4752/0/06400490",
"title": "Visual pattern discovery using random projections",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2012/06400490/12OmNBh8gRI",
"parentPublication": {
"id": "proceedings/vast/2012/4752/0",
"title": "2012 IEEE Conference on Visual Analytics Science and Technology (VAST 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/02/07862917",
"title": "The Subspace Voyager: Exploring High-Dimensional Data along a Continuum of Salient 3D Subspaces",
"doi": null,
"abstractUrl": "/journal/tg/2018/02/07862917/13rRUwInvsY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07534792",
"title": "A Visual Analytics Approach for Categorical Joint Distribution Reconstruction from Marginal Projections",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07534792/13rRUxDIthg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09729550",
"title": "Visual Exploration of Relationships and Structure in Low-Dimensional Embeddings",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09729550/1Bya8LDahDa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09903343",
"title": "RankAxis: Towards a Systematic Combination of Projection and Ranking in Multi-Attribute Data Exploration",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09903343/1GZooOkjYzK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10045801",
"title": "Anchorage: Visual Analysis of Satisfaction in Customer Service Videos Via Anchor Events",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10045801/1KOqKyuerbW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412169",
"title": "Efficient Sentence Embedding via Semantic Subspace Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412169/1tmj5GaqmT6",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552909",
"title": "<italic>Where Can We Help</italic>? A Visual Analytics Approach to Diagnosing and Improving Semantic Segmentation of Movable Objects",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552909/1xibW2zLd9C",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/03/09645173",
"title": "<italic>GUCCI</italic> - Guided Cardiac Cohort Investigation of Blood Flow Data",
"doi": null,
"abstractUrl": "/journal/tg/2023/03/09645173/1zc6CvdsNMc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10068322",
"articleId": "1LtR6T3cY0w",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10066837",
"articleId": "1LtR7JYxVEk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1LvvXdCkKAM",
"name": "ttg555501-010068257s1-supp1-3256376.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010068257s1-supp1-3256376.mp4",
"extension": "mp4",
"size": "61.5 MB",
"__typename": "WebExtraType"
},
{
"id": "1LvvXrKAt7W",
"name": "ttg555501-010068257s1-supp2-3256376.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010068257s1-supp2-3256376.pdf",
"extension": "pdf",
"size": "509 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LtR6T3cY0w",
"doi": "10.1109/TVCG.2023.3255820",
"abstract": "Learning the latent representation of three-dimensional (3D) morphable geometry is useful for several tasks, such as 3D face tracking, human motion analysis, and character generation and animation. For unstructured surface meshes, previous state-of-the-art methods focus on designing convolution operators and share the same pooling and unpooling operations to encode neighborhood information. Previous models use a mesh pooling operation based on edge contraction, which is based on the Euclidean distance of vertices rather than the actual topology. In this study, we investigated whether such a pooling operation can be improved, introducing an improved pooling layer that combines the vertex normals and adjacent faces area. Furthermore, to prevent template overfitting, we increased the receptive field and improved low-resolution projection in the unpooling stage. This increase did not affect processing efficiency because the operation was implemented once on the mesh. We performed experiments to evaluate the proposed method, whose results indicated that the proposed operations outperformed Neural3DMM with 14% lower reconstruction errors and outperformed CoMA by 15% by modifying the pooling and unpooling matrices.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Learning the latent representation of three-dimensional (3D) morphable geometry is useful for several tasks, such as 3D face tracking, human motion analysis, and character generation and animation. For unstructured surface meshes, previous state-of-the-art methods focus on designing convolution operators and share the same pooling and unpooling operations to encode neighborhood information. Previous models use a mesh pooling operation based on edge contraction, which is based on the Euclidean distance of vertices rather than the actual topology. In this study, we investigated whether such a pooling operation can be improved, introducing an improved pooling layer that combines the vertex normals and adjacent faces area. Furthermore, to prevent template overfitting, we increased the receptive field and improved low-resolution projection in the unpooling stage. This increase did not affect processing efficiency because the operation was implemented once on the mesh. We performed experiments to evaluate the proposed method, whose results indicated that the proposed operations outperformed Neural3DMM with 14% lower reconstruction errors and outperformed CoMA by 15% by modifying the pooling and unpooling matrices.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Learning the latent representation of three-dimensional (3D) morphable geometry is useful for several tasks, such as 3D face tracking, human motion analysis, and character generation and animation. For unstructured surface meshes, previous state-of-the-art methods focus on designing convolution operators and share the same pooling and unpooling operations to encode neighborhood information. Previous models use a mesh pooling operation based on edge contraction, which is based on the Euclidean distance of vertices rather than the actual topology. In this study, we investigated whether such a pooling operation can be improved, introducing an improved pooling layer that combines the vertex normals and adjacent faces area. Furthermore, to prevent template overfitting, we increased the receptive field and improved low-resolution projection in the unpooling stage. This increase did not affect processing efficiency because the operation was implemented once on the mesh. We performed experiments to evaluate the proposed method, whose results indicated that the proposed operations outperformed Neural3DMM with 14% lower reconstruction errors and outperformed CoMA by 15% by modifying the pooling and unpooling matrices.",
"title": "Efficient Pooling Operator for 3D Morphable Models",
"normalizedTitle": "Efficient Pooling Operator for 3D Morphable Models",
"fno": "10068322",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Convolution",
"Faces",
"Three Dimensional Displays",
"Shape",
"Task Analysis",
"Computational Modeling",
"Geometry",
"Latent Representation",
"Mesh Reconstruction",
"Morphable Model"
],
"authors": [
{
"givenName": "Haoliang",
"surname": "Zhang",
"fullName": "Haoliang Zhang",
"affiliation": "The University of Oklahoma, Norman, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Samuel",
"surname": "Cheng",
"fullName": "Samuel Cheng",
"affiliation": "The University of Oklahoma, Norman, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Christian El",
"surname": "Amm",
"fullName": "Christian El Amm",
"affiliation": "The University of Oklahoma, Norman, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jonghoon",
"surname": "Kim",
"fullName": "Jonghoon Kim",
"affiliation": "Chungnam National University, Republic of Korea",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-9",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2017/1034/0/1034a895",
"title": "3D Morphable Models as Spatial Transformer Networks",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034a895/12OmNyPQ4OA",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000c626",
"title": "Recurrent Slice Networks for 3D Segmentation of Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000c626/17D45Xq6dzm",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/09870559",
"title": "P2T: Pyramid Pooling Transformer for Scene Understanding",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/09870559/1GgcM53dti8",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2019/9214/0/921400a513",
"title": "Adaptive Salience Preserving Pooling for Deep Convolutional Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2019/921400a513/1cJ0Ai6YpXi",
"parentPublication": {
"id": "proceedings/icmew/2019/9214/0",
"title": "2019 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300h212",
"title": "Neural 3D Morphable Models: Spiral Convolutional Networks for 3D Shape Representation Learning and Generation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300h212/1hVlISVeBaw",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300d364",
"title": "Global Feature Guided Local Pooling",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300d364/1hVldsD7mpy",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150750",
"title": "Mesh Variational Autoencoders with Edge Contraction Pooling",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150750/1lPH7Uy2hqw",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800i609",
"title": "DualConvMesh-Net: Joint Geodesic and Euclidean Convolutions on 3D Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800i609/1m3nzTemaGs",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/02/09211767",
"title": "Learnable Pooling in Graph Convolutional Networks for Brain Surface Analysis",
"doi": null,
"abstractUrl": "/journal/tp/2022/02/09211767/1nB9SOc7u9i",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900n3159",
"title": "Learning Feature Aggregation for Deep 3D Morphable Models",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900n3159/1yeJkSsaWNa",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10066189",
"articleId": "1LoWA0TvtJK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10068257",
"articleId": "1LtR7CeyeHe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1LxaWO7f96M",
"name": "ttg555501-010068322s1-supp1-3255820.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010068322s1-supp1-3255820.pdf",
"extension": "pdf",
"size": "1.04 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LoWA0TvtJK",
"doi": "10.1109/TVCG.2023.3254522",
"abstract": "Feedback on the material properties of a visual object is essential in enhancing the users' perceptual experience of the object when users control the object with touchless inputs. Focusing on the softness perception of the object, we examined how the effective distance of hand movements influenced the degree of the object's softness perceived by users. In the experiments, participants moved their right hand in front of a camera which tracked their hand position. A textured 2D or 3D object on display deformed depending on the participant's hand position. In addition to establishing a ratio of deformation magnitude to the distance of hand movements, we altered the effective distance of hand movement, within which the hand movement could deform the object. Participants rated the strength of perceived softness (Experiments 1 and 2) and other perceptual impressions (Experiment 3). A longer effective distance produced a softer impression of the 2D and 3D objects. The saturation speed of object deformation due to the effective distance was not a critical determinant. The effective distance also modulated other perceptual impressions than softness. The role of the effective distance of hand movements on perceptual impressions of objects under touchless control is discussed.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Feedback on the material properties of a visual object is essential in enhancing the users' perceptual experience of the object when users control the object with touchless inputs. Focusing on the softness perception of the object, we examined how the effective distance of hand movements influenced the degree of the object's softness perceived by users. In the experiments, participants moved their right hand in front of a camera which tracked their hand position. A textured 2D or 3D object on display deformed depending on the participant's hand position. In addition to establishing a ratio of deformation magnitude to the distance of hand movements, we altered the effective distance of hand movement, within which the hand movement could deform the object. Participants rated the strength of perceived softness (Experiments 1 and 2) and other perceptual impressions (Experiment 3). A longer effective distance produced a softer impression of the 2D and 3D objects. The saturation speed of object deformation due to the effective distance was not a critical determinant. The effective distance also modulated other perceptual impressions than softness. The role of the effective distance of hand movements on perceptual impressions of objects under touchless control is discussed.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Feedback on the material properties of a visual object is essential in enhancing the users' perceptual experience of the object when users control the object with touchless inputs. Focusing on the softness perception of the object, we examined how the effective distance of hand movements influenced the degree of the object's softness perceived by users. In the experiments, participants moved their right hand in front of a camera which tracked their hand position. A textured 2D or 3D object on display deformed depending on the participant's hand position. In addition to establishing a ratio of deformation magnitude to the distance of hand movements, we altered the effective distance of hand movement, within which the hand movement could deform the object. Participants rated the strength of perceived softness (Experiments 1 and 2) and other perceptual impressions (Experiment 3). A longer effective distance produced a softer impression of the 2D and 3D objects. The saturation speed of object deformation due to the effective distance was not a critical determinant. The effective distance also modulated other perceptual impressions than softness. The role of the effective distance of hand movements on perceptual impressions of objects under touchless control is discussed.",
"title": "Softness Perception of Visual Objects Controlled by Touchless Inputs: The Role of Effective Distance of Hand Movements",
"normalizedTitle": "Softness Perception of Visual Objects Controlled by Touchless Inputs: The Role of Effective Distance of Hand Movements",
"fno": "10066189",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Deformation",
"Visualization",
"Cameras",
"Material Properties",
"Springs",
"Monitoring",
"Fabrics",
"Material Perception",
"Pseudo Haptics",
"Touchless Inputs",
"Softness"
],
"authors": [
{
"givenName": "Takahiro",
"surname": "Kawabe",
"fullName": "Takahiro Kawabe",
"affiliation": "NTT Communication Science Laboratories, Nippon Telegraph, Telephone Corporation, Atsugi, Kanagawa, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yusuke",
"surname": "Ujitoko",
"fullName": "Yusuke Ujitoko",
"affiliation": "NTT Communication Science Laboratories, Nippon Telegraph, Telephone Corporation, Atsugi, Kanagawa, Japan",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-17",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2003/2105/5/210550046",
"title": "GWINDOWS: Towards Robust Perception-Based UI",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2003/210550046/12OmNAHmOwb",
"parentPublication": {
"id": "proceedings/cvprw/2003/2105/5",
"title": "2003 Conference on Computer Vision and Pattern Recognition Workshop - Volume 5",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2010/9343/0/05643561",
"title": "Effects of a retroreflective screen on depth perception in a head-mounted projection display",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643561/12OmNB9bvby",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04811048",
"title": "Subjective Perception and Objective Measurements in Perceiving Object Softness for VR Surgical Systems",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811048/12OmNwtWfHK",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/11/07165660",
"title": "<italic>SoftAR</italic>: Visually Manipulating Haptic Softness Perception in Spatial Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2015/11/07165660/13rRUIIVlcN",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2015/02/07042750",
"title": "Design and Characterization of a Fabric-Based Softness Display",
"doi": null,
"abstractUrl": "/journal/th/2015/02/07042750/13rRUwI5TRa",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2017/03/07556272",
"title": "Vibrotactile Compliance Feedback for Tangential Force Interaction",
"doi": null,
"abstractUrl": "/journal/th/2017/03/07556272/13rRUwcS1D9",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2013/04/tth2013040464",
"title": "Identifying Haptic Exploratory Procedures by Analyzing Hand Dynamics and Contact Force",
"doi": null,
"abstractUrl": "/journal/th/2013/04/tth2013040464/13rRUxBJhmY",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2017/04/07728146",
"title": "Visual Vibrometry: Estimating Material Properties from Small Motions in Video",
"doi": null,
"abstractUrl": "/journal/tp/2017/04/07728146/13rRUxYrbNC",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/08/08675400",
"title": "Recognizing Material Properties from Images",
"doi": null,
"abstractUrl": "/journal/tp/2020/08/08675400/18K0dyftG5q",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/12/09117062",
"title": "Augmenting Perceived Softness of Haptic Proxy Objects Through Transient Vibration and Visuo-Haptic Illusion in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2021/12/09117062/1kGg69DDrFe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10066194",
"articleId": "1LoWzFuMlMc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10068322",
"articleId": "1LtR6T3cY0w",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1LtR7psjMFa",
"name": "ttg555501-010066189s1-supp1-3254522.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010066189s1-supp1-3254522.mp4",
"extension": "mp4",
"size": "32.8 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LoWzFuMlMc",
"doi": "10.1109/TVCG.2023.3255207",
"abstract": "We propose a robust and automatic method to construct manifold cages for 3D triangular meshes. The cage contains hundreds of triangles to tightly enclose the input mesh without self-intersections. To generate such cages, our algorithm consists of two phases: (1) construct manifold cages satisfying the tightness, enclosing, and intersection-free requirements and (2) reduce mesh complexities and approximation errors without violating the enclosing and intersection-free requirements. To theoretically make the first stage have those properties, we combine the conformal tetrahedral meshing and tetrahedral mesh subdivision. The second step is a constrained remeshing process using explicit checks to ensure that the enclosing and intersection-free constraints are always satisfied. Both phases use a hybrid coordinate representation, i.e., rational numbers and floating point numbers, combined with exact arithmetic and floating point filtering techniques to guarantee the robustness of geometric predicates with a favorable speed. We extensively test our method on a data set of over 8500 models, demonstrating robustness and performance. Compared to other state-of-the-art methods, our method possesses much stronger robustness.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a robust and automatic method to construct manifold cages for 3D triangular meshes. The cage contains hundreds of triangles to tightly enclose the input mesh without self-intersections. To generate such cages, our algorithm consists of two phases: (1) construct manifold cages satisfying the tightness, enclosing, and intersection-free requirements and (2) reduce mesh complexities and approximation errors without violating the enclosing and intersection-free requirements. To theoretically make the first stage have those properties, we combine the conformal tetrahedral meshing and tetrahedral mesh subdivision. The second step is a constrained remeshing process using explicit checks to ensure that the enclosing and intersection-free constraints are always satisfied. Both phases use a hybrid coordinate representation, i.e., rational numbers and floating point numbers, combined with exact arithmetic and floating point filtering techniques to guarantee the robustness of geometric predicates with a favorable speed. We extensively test our method on a data set of over 8500 models, demonstrating robustness and performance. Compared to other state-of-the-art methods, our method possesses much stronger robustness.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a robust and automatic method to construct manifold cages for 3D triangular meshes. The cage contains hundreds of triangles to tightly enclose the input mesh without self-intersections. To generate such cages, our algorithm consists of two phases: (1) construct manifold cages satisfying the tightness, enclosing, and intersection-free requirements and (2) reduce mesh complexities and approximation errors without violating the enclosing and intersection-free requirements. To theoretically make the first stage have those properties, we combine the conformal tetrahedral meshing and tetrahedral mesh subdivision. The second step is a constrained remeshing process using explicit checks to ensure that the enclosing and intersection-free constraints are always satisfied. Both phases use a hybrid coordinate representation, i.e., rational numbers and floating point numbers, combined with exact arithmetic and floating point filtering techniques to guarantee the robustness of geometric predicates with a favorable speed. We extensively test our method on a data set of over 8500 models, demonstrating robustness and performance. Compared to other state-of-the-art methods, our method possesses much stronger robustness.",
"title": "Robust Coarse Cage Construction with Small Approximation Errors",
"normalizedTitle": "Robust Coarse Cage Construction with Small Approximation Errors",
"fno": "10066194",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Complexity Theory",
"Robustness",
"Manifolds",
"Approximation Error",
"Approximation Algorithms",
"Filtering",
"Geometry",
"Cage Construction",
"Conformal Tetrahedral Meshing",
"Tetrahedral Subdivision",
"Approximation Error",
"Mesh Complexity"
],
"authors": [
{
"givenName": "Jia-Peng",
"surname": "Guo",
"fullName": "Jia-Peng Guo",
"affiliation": "School of Data Sciences, University of Science and Technology of China, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wen-Xiang",
"surname": "Zhang",
"fullName": "Wen-Xiang Zhang",
"affiliation": "School of Mathematical Sciences, University of Science and Technology of China, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chunyang",
"surname": "Ye",
"fullName": "Chunyang Ye",
"affiliation": "School of Mathematical Sciences, University of Science and Technology of China, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiao-Ming",
"surname": "Fu",
"fullName": "Xiao-Ming Fu",
"affiliation": "School of Mathematical Sciences, University of Science and Technology of China, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/sibgra/2003/2032/0/01240987",
"title": "Moving least squares multiresolution surface approximation",
"doi": null,
"abstractUrl": "/proceedings-article/sibgra/2003/01240987/12OmNqBbHBh",
"parentPublication": {
"id": "proceedings/sibgra/2003/2032/0",
"title": "16th Brazilian Symposium on Computer Graphics and Image Processing (SIBGRAPI 2003)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1997/8262/0/82620387",
"title": "The multilevel finite element method for adaptive mesh optimization and visualization of volume data",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1997/82620387/12OmNrAMEQ3",
"parentPublication": {
"id": "proceedings/ieee-vis/1997/8262/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1991/0003/0/00150901",
"title": "Optimal local spline approximation of planar shape",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1991/00150901/12OmNvAAtwS",
"parentPublication": {
"id": "proceedings/icassp/1991/0003/0",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2008/3554/0/04775699",
"title": "Triangular Mesh Geometry Coding with Multiresolution Decomposition Based on Structuring of Surrounding Vertices",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2008/04775699/12OmNviHKdA",
"parentPublication": {
"id": "proceedings/isspit/2008/3554/0",
"title": "2008 8th IEEE International Symposium on Signal Processing and Information Technology. ISSPIT 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2003/2030/0/01250399",
"title": "Visualization of volume data with quadratic super splines",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2003/01250399/12OmNvkpl5I",
"parentPublication": {
"id": "proceedings/ieee-vis/2003/2030/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/snpd/2007/2909/3/290930216",
"title": "Offset Approximation Algorithm for Subdivision Surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/snpd/2007/290930216/12OmNwvVrNs",
"parentPublication": {
"id": "proceedings/snpd/2007/2909/3",
"title": "Eighth ACIS International Conference on Software Engineering, Artificial Intelligence, Networking, and Parallel/Distributed Computing (SNPD 2007)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/12/07756294",
"title": "Error-Bounded and Feature Preserving Surface Remeshing with Minimal Angle Improvement",
"doi": null,
"abstractUrl": "/journal/tg/2017/12/07756294/13rRUxlgy3O",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/focs/2022/2055/0/205500a237",
"title": "Applications of Random Algebraic Constructions to Hardness of Approximation",
"doi": null,
"abstractUrl": "/proceedings-article/focs/2022/205500a237/1BtfyMBjvEs",
"parentPublication": {
"id": "proceedings/focs/2022/2055/0",
"title": "2021 IEEE 62nd Annual Symposium on Foundations of Computer Science (FOCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09767783",
"title": "b/Surf: Interactive Bézier Splines on Surface Meshes",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09767783/1D4MIotOemQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2003/2032/0/01240987",
"title": "Moving least squares multiresolution surface approximation",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2003/01240987/1h0F2U06q3e",
"parentPublication": {
"id": "proceedings/sibgrapi/2003/2032/0",
"title": "16th Brazilian Symposium on Computer Graphics and Image Processing (SIBGRAPI 2003)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10061279",
"articleId": "1LiKMy3pdDO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10066189",
"articleId": "1LoWA0TvtJK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1LtR6HfIFOg",
"name": "ttg555501-010066194s1-supp2-3255207.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010066194s1-supp2-3255207.pdf",
"extension": "pdf",
"size": "275 kB",
"__typename": "WebExtraType"
},
{
"id": "1LtR6zVgSyY",
"name": "ttg555501-010066194s1-supp1-3255207.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010066194s1-supp1-3255207.mp4",
"extension": "mp4",
"size": "33.7 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LiKMy3pdDO",
"doi": "10.1109/TVCG.2023.3253184",
"abstract": "Large-scale datasets and deep generative models have enabled impressive progress in human face reenactment. Existing solutions for face reenactment have focused on processing real face images through facial landmarks by generative models. Different from real human faces, artistic human faces (e.g., those in paintings, cartoons, etc.) often involve exaggerated shapes and various textures. Therefore, directly applying existing solutions to artistic faces often fails to preserve the characteristics of the original artistic faces (e.g., face identity and decorative lines along face contours) due to the domain gap between real and artistic faces. To address these issues, we present <italic>ReenactArtFace</italic>, the first effective solution for transferring the poses and expressions from human videos to various artistic face images. We achieve artistic face reenactment in a coarse-to-fine manner. First, we perform <italic>3D artistic face reconstruction</italic>, which reconstructs a textured 3D artistic face through a 3D morphable model (3DMM) and a 2D parsing map from an input artistic image. The 3DMM can not only rig the expressions better than facial landmarks but also render images under different poses/expressions as coarse reenactment results robustly. However, these coarse results suffer from self-occlusions and lack contour lines. Second, we thus perform <italic>artistic face refinement</italic> by using a personalized conditional adversarial generative model (cGAN) fine-tuned on the input artistic image and the coarse reenactment results. For high-quality refinement, we propose a contour loss to supervise the cGAN to faithfully synthesize contour lines. Quantitative and qualitative experiments demonstrate that our method achieves better results than the existing solutions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Large-scale datasets and deep generative models have enabled impressive progress in human face reenactment. Existing solutions for face reenactment have focused on processing real face images through facial landmarks by generative models. Different from real human faces, artistic human faces (e.g., those in paintings, cartoons, etc.) often involve exaggerated shapes and various textures. Therefore, directly applying existing solutions to artistic faces often fails to preserve the characteristics of the original artistic faces (e.g., face identity and decorative lines along face contours) due to the domain gap between real and artistic faces. To address these issues, we present <italic>ReenactArtFace</italic>, the first effective solution for transferring the poses and expressions from human videos to various artistic face images. We achieve artistic face reenactment in a coarse-to-fine manner. First, we perform <italic>3D artistic face reconstruction</italic>, which reconstructs a textured 3D artistic face through a 3D morphable model (3DMM) and a 2D parsing map from an input artistic image. The 3DMM can not only rig the expressions better than facial landmarks but also render images under different poses/expressions as coarse reenactment results robustly. However, these coarse results suffer from self-occlusions and lack contour lines. Second, we thus perform <italic>artistic face refinement</italic> by using a personalized conditional adversarial generative model (cGAN) fine-tuned on the input artistic image and the coarse reenactment results. For high-quality refinement, we propose a contour loss to supervise the cGAN to faithfully synthesize contour lines. Quantitative and qualitative experiments demonstrate that our method achieves better results than the existing solutions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Large-scale datasets and deep generative models have enabled impressive progress in human face reenactment. Existing solutions for face reenactment have focused on processing real face images through facial landmarks by generative models. Different from real human faces, artistic human faces (e.g., those in paintings, cartoons, etc.) often involve exaggerated shapes and various textures. Therefore, directly applying existing solutions to artistic faces often fails to preserve the characteristics of the original artistic faces (e.g., face identity and decorative lines along face contours) due to the domain gap between real and artistic faces. To address these issues, we present ReenactArtFace, the first effective solution for transferring the poses and expressions from human videos to various artistic face images. We achieve artistic face reenactment in a coarse-to-fine manner. First, we perform 3D artistic face reconstruction, which reconstructs a textured 3D artistic face through a 3D morphable model (3DMM) and a 2D parsing map from an input artistic image. The 3DMM can not only rig the expressions better than facial landmarks but also render images under different poses/expressions as coarse reenactment results robustly. However, these coarse results suffer from self-occlusions and lack contour lines. Second, we thus perform artistic face refinement by using a personalized conditional adversarial generative model (cGAN) fine-tuned on the input artistic image and the coarse reenactment results. For high-quality refinement, we propose a contour loss to supervise the cGAN to faithfully synthesize contour lines. Quantitative and qualitative experiments demonstrate that our method achieves better results than the existing solutions.",
"title": "ReenactArtFace: Artistic Face Image Reenactment",
"normalizedTitle": "ReenactArtFace: Artistic Face Image Reenactment",
"fno": "10061279",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Faces",
"Three Dimensional Displays",
"Image Reconstruction",
"Videos",
"Geometry",
"Generators",
"Fitting",
"3 DMM",
"Artistic Faces",
"Face Reenactment",
"Generative Models"
],
"authors": [
{
"givenName": "Linzi",
"surname": "Qu",
"fullName": "Linzi Qu",
"affiliation": "School of Creative Media, City University of Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jiaxiang",
"surname": "Shang",
"fullName": "Jiaxiang Shang",
"affiliation": "Department of Computer Science & Engineering, HKUST, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiaoguang",
"surname": "Han",
"fullName": "Xiaoguang Han",
"affiliation": "Shenzhen Research Institute of Big Data, Chinese University of Hong Kong, Shenzhen, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hongbo",
"surname": "Fu",
"fullName": "Hongbo Fu",
"affiliation": "School of Creative Media, City University of Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2014/5118/0/06909933",
"title": "Automatic Face Reenactment",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/06909933/12OmNAq3hMr",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/01/09763438",
"title": "FSGANv2: Improved Subject Agnostic Face Swapping and Reenactment",
"doi": null,
"abstractUrl": "/journal/tp/2023/01/09763438/1CT4UvMc0pO",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300h183",
"title": "FSGAN: Subject Agnostic Face Swapping and Reenactment",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300h183/1hQqq3rkN0c",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093474",
"title": "ICface: Interpretable and Controllable Face Reenactment Using GANs",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093474/1jPbcapGK7m",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2020/3079/0/307900a786",
"title": "Generative Video Face Reenactment by AUs and Gaze Regularization",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2020/307900a786/1kecIXW5F9m",
"parentPublication": {
"id": "proceedings/fg/2020/3079/0/",
"title": "2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800f325",
"title": "FReeNet: Multi-Identity Face Reenactment",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800f325/1m3neHAcbKM",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800h082",
"title": "Learning Identity-Invariant Motion Representations for Cross-ID Face Reenactment",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800h082/1m3nhQ6ft9S",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2021/3864/0/09428233",
"title": "Li-Net: Large-Pose Identity-Preserving Face Reenactment Network",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2021/09428233/1uilwRqglYA",
"parentPublication": {
"id": "proceedings/icme/2021/3864/0",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700b328",
"title": "FACEGAN: Facial Attribute Controllable rEenactment GAN",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700b328/1uqGqHtd4CQ",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900c236",
"title": "Pareidolia Face Reenactment",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900c236/1yeIeqChdvO",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10058545",
"articleId": "1LdkjmPPahy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10066194",
"articleId": "1LoWzFuMlMc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LdkjmPPahy",
"doi": "10.1109/TVCG.2023.3251950",
"abstract": "Machine learning models have gained traction as decision support tools for tasks that require processing copious amounts of data. However, to achieve the primary benefits of automating this part of decision-making, people must be able to trust the machine learning model's outputs. In order to enhance people's trust and promote appropriate reliance on the model, visualization techniques such as interactive model steering, performance analysis, model comparison, and uncertainty visualization have been proposed. In this study, we tested the effects of two uncertainty visualization techniques in a college admissions forecasting task, under two task difficulty levels, using Amazon's Mechanical Turk platform. Results show that (1) people's reliance on the model depends on the task difficulty and level of machine uncertainty and (2) ordinal forms of expressing model uncertainty are more likely to calibrate model usage behavior. These outcomes emphasize that reliance on decision support tools can depend on the cognitive accessibility of the visualization technique and perceptions of model performance and task difficulty.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Machine learning models have gained traction as decision support tools for tasks that require processing copious amounts of data. However, to achieve the primary benefits of automating this part of decision-making, people must be able to trust the machine learning model's outputs. In order to enhance people's trust and promote appropriate reliance on the model, visualization techniques such as interactive model steering, performance analysis, model comparison, and uncertainty visualization have been proposed. In this study, we tested the effects of two uncertainty visualization techniques in a college admissions forecasting task, under two task difficulty levels, using Amazon's Mechanical Turk platform. Results show that (1) people's reliance on the model depends on the task difficulty and level of machine uncertainty and (2) ordinal forms of expressing model uncertainty are more likely to calibrate model usage behavior. These outcomes emphasize that reliance on decision support tools can depend on the cognitive accessibility of the visualization technique and perceptions of model performance and task difficulty.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Machine learning models have gained traction as decision support tools for tasks that require processing copious amounts of data. However, to achieve the primary benefits of automating this part of decision-making, people must be able to trust the machine learning model's outputs. In order to enhance people's trust and promote appropriate reliance on the model, visualization techniques such as interactive model steering, performance analysis, model comparison, and uncertainty visualization have been proposed. In this study, we tested the effects of two uncertainty visualization techniques in a college admissions forecasting task, under two task difficulty levels, using Amazon's Mechanical Turk platform. Results show that (1) people's reliance on the model depends on the task difficulty and level of machine uncertainty and (2) ordinal forms of expressing model uncertainty are more likely to calibrate model usage behavior. These outcomes emphasize that reliance on decision support tools can depend on the cognitive accessibility of the visualization technique and perceptions of model performance and task difficulty.",
"title": "Evaluating the Impact of Uncertainty Visualization on Model Reliance",
"normalizedTitle": "Evaluating the Impact of Uncertainty Visualization on Model Reliance",
"fno": "10058545",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Uncertainty",
"Predictive Models",
"Task Analysis",
"Data Visualization",
"Data Models",
"Computational Modeling",
"Prediction Algorithms",
"Uncertainty",
"Model Reliance",
"Trust",
"Human Machine Collaborations"
],
"authors": [
{
"givenName": "Jieqiong",
"surname": "Zhao",
"fullName": "Jieqiong Zhao",
"affiliation": "Arizona State University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yixuan",
"surname": "Wang",
"fullName": "Yixuan Wang",
"affiliation": "Arizona State University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Michelle V.",
"surname": "Mancenido",
"fullName": "Michelle V. Mancenido",
"affiliation": "Arizona State University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Erin K.",
"surname": "Chiou",
"fullName": "Erin K. Chiou",
"affiliation": "Arizona State University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ross",
"surname": "Maciejewski",
"fullName": "Ross Maciejewski",
"affiliation": "Arizona State University, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2017/09/07563342",
"title": "Uncertainty Visualization by Representative Sampling from Prediction Ensembles",
"doi": null,
"abstractUrl": "/journal/tg/2017/09/07563342/13rRUIM2VH4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2013/01/mcg2013010075",
"title": "Visualization of Uncertainty without a Mean",
"doi": null,
"abstractUrl": "/magazine/cg/2013/01/mcg2013010075/13rRUwcAquB",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08457476",
"title": "In Pursuit of Error: A Survey of Uncertainty Visualization Evaluation",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08457476/17D45WaTkcP",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09903471",
"title": "Fiber Uncertainty Visualization for Bivariate Data With Parametric and Nonparametric Noise Models",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09903471/1GZolxWTqPS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904442",
"title": "Communicating Uncertainty in Digital Humanities Visualization Research",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904442/1H1gpt871W8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2022/4609/0/460900a626",
"title": "Unsupervised DeepView: Global Uncertainty Visualization for High Dimensional Data",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2022/460900a626/1KBr5pVl2qA",
"parentPublication": {
"id": "proceedings/icdmw/2022/4609/0",
"title": "2022 IEEE International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2022/5725/0/572500a109",
"title": "Visualization of Machine Learning Uncertainty in AR-Based See-Through Applications",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2022/572500a109/1KmFcUFPF3G",
"parentPublication": {
"id": "proceedings/aivr/2022/5725/0",
"title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ldav/2018/6873/0/08739182",
"title": "Visual Analysis of Simulation Uncertainty Using Cost-Effective Sampling",
"doi": null,
"abstractUrl": "/proceedings-article/ldav/2018/08739182/1b1xbHTEtag",
"parentPublication": {
"id": "proceedings/ldav/2018/6873/0",
"title": "2018 IEEE 8th Symposium on Large Data Analysis and Visualization (LDAV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08805422",
"title": "Why Authors Don't Visualize Uncertainty",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08805422/1cG4ylx5qbC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis4dh/2021/1370/0/137000a012",
"title": "Uncertainty-aware Topic Modeling Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/vis4dh/2021/137000a012/1yNiG9yU9JS",
"parentPublication": {
"id": "proceedings/vis4dh/2021/1370/0",
"title": "2021 IEEE 6th Workshop on Visualization for the Digital Humanities (VIS4DH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10058042",
"articleId": "1LbFn8YmYjC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10061279",
"articleId": "1LiKMy3pdDO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1LxaWjANsUE",
"name": "ttg555501-010058545s1-supp3-3251950.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010058545s1-supp3-3251950.pdf",
"extension": "pdf",
"size": "2.48 MB",
"__typename": "WebExtraType"
},
{
"id": "1LxaSw96gEM",
"name": "ttg555501-010058545s1-supp2-3251950.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010058545s1-supp2-3251950.pdf",
"extension": "pdf",
"size": "1.44 MB",
"__typename": "WebExtraType"
},
{
"id": "1LxaWBl0H4I",
"name": "ttg555501-010058545s1-supp1-3251950.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010058545s1-supp1-3251950.pdf",
"extension": "pdf",
"size": "103 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LbFmZlZK24",
"doi": "10.1109/TVCG.2023.3251188",
"abstract": "How we perceive and experience the world around us is inherently multisensory. Most of the Virtual Reality (VR) literature is based on the senses of sight and hearing. However, there is a lot of potential for integrating additional stimuli into Virtual Environments (VEs), especially in a training context. Identifying the relevant stimuli for obtaining a virtual experience that is perceptually equivalent to a real experience will lead users to behave the same across environments, which adds substantial value for several training areas, such as firefighters. In this paper, we present an experiment aiming to assess the impact of different sensory stimuli on stress, fatigue, cybersickness, Presence and knowledge transfer of users during a firefighter training VE. The results suggested that the stimulus that significantly impacted the user's response was wearing a firefighter's uniform and combining all sensory stimuli under study: heat, weight, uniform, and mask. The results also showed that the VE did not induce cybersickness and that it was successful in the task of transferring knowledge.",
"abstracts": [
{
"abstractType": "Regular",
"content": "How we perceive and experience the world around us is inherently multisensory. Most of the Virtual Reality (VR) literature is based on the senses of sight and hearing. However, there is a lot of potential for integrating additional stimuli into Virtual Environments (VEs), especially in a training context. Identifying the relevant stimuli for obtaining a virtual experience that is perceptually equivalent to a real experience will lead users to behave the same across environments, which adds substantial value for several training areas, such as firefighters. In this paper, we present an experiment aiming to assess the impact of different sensory stimuli on stress, fatigue, cybersickness, Presence and knowledge transfer of users during a firefighter training VE. The results suggested that the stimulus that significantly impacted the user's response was wearing a firefighter's uniform and combining all sensory stimuli under study: heat, weight, uniform, and mask. The results also showed that the VE did not induce cybersickness and that it was successful in the task of transferring knowledge.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "How we perceive and experience the world around us is inherently multisensory. Most of the Virtual Reality (VR) literature is based on the senses of sight and hearing. However, there is a lot of potential for integrating additional stimuli into Virtual Environments (VEs), especially in a training context. Identifying the relevant stimuli for obtaining a virtual experience that is perceptually equivalent to a real experience will lead users to behave the same across environments, which adds substantial value for several training areas, such as firefighters. In this paper, we present an experiment aiming to assess the impact of different sensory stimuli on stress, fatigue, cybersickness, Presence and knowledge transfer of users during a firefighter training VE. The results suggested that the stimulus that significantly impacted the user's response was wearing a firefighter's uniform and combining all sensory stimuli under study: heat, weight, uniform, and mask. The results also showed that the VE did not induce cybersickness and that it was successful in the task of transferring knowledge.",
"title": "Studying the Influence of Multisensory Stimuli on a Firefighting Training Virtual Environment",
"normalizedTitle": "Studying the Influence of Multisensory Stimuli on a Firefighting Training Virtual Environment",
"fno": "10057483",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Training",
"Human Factors",
"Heart Rate Variability",
"Visualization",
"Virtual Environments",
"Market Research",
"Cybersickness",
"Computer Graphics",
"Virtual Reality",
"Professional Training",
"Biofeedback"
],
"authors": [
{
"givenName": "David",
"surname": "Narciso",
"fullName": "David Narciso",
"affiliation": "University of Trás-os-Montes e Alto Douro (UTAD). Quinta de Prados, Vila Real, Portugal",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Miguel",
"surname": "Melo",
"fullName": "Miguel Melo",
"affiliation": "Institute for Systems and Computer Engineering, Technology and Science (INESC TEC). Rua Dr. Roberto Frias, Porto, Portugal",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Susana",
"surname": "Rodrigues",
"fullName": "Susana Rodrigues",
"affiliation": "Institute for Systems and Computer Engineering, Technology and Science (INESC TEC). Rua Dr. Roberto Frias, Porto, Portugal",
"__typename": "ArticleAuthorType"
},
{
"givenName": "João Paulo",
"surname": "Cunha",
"fullName": "João Paulo Cunha",
"affiliation": "Institute for Systems and Computer Engineering, Technology and Science (INESC TEC). Rua Dr. Roberto Frias, Porto, Portugal",
"__typename": "ArticleAuthorType"
},
{
"givenName": "José",
"surname": "Vasconcelos-Raposo",
"fullName": "José Vasconcelos-Raposo",
"affiliation": "University of Trás-os-Montes e Alto Douro (UTAD). Quinta de Prados, Vila Real, Portugal",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Maximino",
"surname": "Bessa",
"fullName": "Maximino Bessa",
"affiliation": "University of Trás-os-Montes e Alto Douro (UTAD). Quinta de Prados, Vila Real, Portugal",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/ta/2020/03/08301612",
"title": "Classifying Affective Haptic Stimuli through Gender-Specific Heart Rate Variability Nonlinear Analysis",
"doi": null,
"abstractUrl": "/journal/ta/2020/03/08301612/13rRUy0qnC5",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714043",
"title": "Studying the Effects of Congruence of Auditory and Visual Stimuli on Virtual Reality Experiences",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714043/1B0Y2dBeUi4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09729540",
"title": "Using Heart Rate Variability for Comparing the Effectiveness of Virtual vs Real Training Environments for Firefighters",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09729540/1Bya8YD1tUk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a794",
"title": "An Investigation on the Relationship between Cybersickness and Heart Rate Variability When Navigating a Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a794/1J7We4du3FC",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089513",
"title": "Comparative Evaluation of the Effects of Motion Control on Cybersickness in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089513/1jIx7SE9LiM",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089551",
"title": "A Structural Equation Modeling Approach to Understand the Relationship between Control, Cybersickness and Presence in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089551/1jIx95ncylO",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/02/09143472",
"title": "Do Multisensory Stimuli Benefit the Virtual Reality Experience? A Systematic Review",
"doi": null,
"abstractUrl": "/journal/tg/2022/02/09143472/1lxmwwX05lC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2020/9574/0/957400a813",
"title": "Impact of Different Stimuli on User Stress During a Virtual Firefighting Training Exercise",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2020/957400a813/1pBMqLNShEc",
"parentPublication": {
"id": "proceedings/bibe/2020/9574/0",
"title": "2020 IEEE 20th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09386008",
"title": "Floor-vibration VR: Mitigating Cybersickness Using Whole-body Tactile Stimuli in Highly Realistic Vehicle Driving Experiences",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09386008/1seiz94oUco",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a178",
"title": "Redirected Tilting: Eliciting Postural Changes with a Rotational Self-Motion Illusion",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a178/1tnWQaG0jyo",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10057127",
"articleId": "1La0wW0rjEs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10057994",
"articleId": "1LbFmG2HHnW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LbFmG2HHnW",
"doi": "10.1109/TVCG.2023.3251344",
"abstract": "Dashboards, which comprise multiple views on a single display, help analyze and communicate multiple perspectives of data simultaneously. However, creating effective and elegant dashboards is challenging since it requires careful and logical arrangement and coordination of multiple visualizations. To solve the problem, we propose a data-driven approach for mining design rules from dashboards and automating dashboard organization. Specifically, we focus on two prominent aspects of the organization: <italic>arrangement</italic>, which describes the position, size, and layout of each view in the display space; and <italic>coordination</italic>, which indicates the interaction between pairwise views. We build a new dataset containing 854 dashboards crawled online, and develop feature engineering methods for describing the single views and view-wise relationships in terms of data, encoding, layout, and interactions. Further, we identify design rules among those features and develop a recommender for dashboard design. We demonstrate the usefulness of DMiner through an expert study and a user study. The expert study shows that our extracted design rules are reasonable and conform to the design practice of experts. Moreover, a comparative user study shows that our recommender could help automate dashboard organization and reach human-level performance. In summary, our work offers a promising starting point for design mining visualizations to build recommenders.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Dashboards, which comprise multiple views on a single display, help analyze and communicate multiple perspectives of data simultaneously. However, creating effective and elegant dashboards is challenging since it requires careful and logical arrangement and coordination of multiple visualizations. To solve the problem, we propose a data-driven approach for mining design rules from dashboards and automating dashboard organization. Specifically, we focus on two prominent aspects of the organization: <italic>arrangement</italic>, which describes the position, size, and layout of each view in the display space; and <italic>coordination</italic>, which indicates the interaction between pairwise views. We build a new dataset containing 854 dashboards crawled online, and develop feature engineering methods for describing the single views and view-wise relationships in terms of data, encoding, layout, and interactions. Further, we identify design rules among those features and develop a recommender for dashboard design. We demonstrate the usefulness of DMiner through an expert study and a user study. The expert study shows that our extracted design rules are reasonable and conform to the design practice of experts. Moreover, a comparative user study shows that our recommender could help automate dashboard organization and reach human-level performance. In summary, our work offers a promising starting point for design mining visualizations to build recommenders.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Dashboards, which comprise multiple views on a single display, help analyze and communicate multiple perspectives of data simultaneously. However, creating effective and elegant dashboards is challenging since it requires careful and logical arrangement and coordination of multiple visualizations. To solve the problem, we propose a data-driven approach for mining design rules from dashboards and automating dashboard organization. Specifically, we focus on two prominent aspects of the organization: arrangement, which describes the position, size, and layout of each view in the display space; and coordination, which indicates the interaction between pairwise views. We build a new dataset containing 854 dashboards crawled online, and develop feature engineering methods for describing the single views and view-wise relationships in terms of data, encoding, layout, and interactions. Further, we identify design rules among those features and develop a recommender for dashboard design. We demonstrate the usefulness of DMiner through an expert study and a user study. The expert study shows that our extracted design rules are reasonable and conform to the design practice of experts. Moreover, a comparative user study shows that our recommender could help automate dashboard organization and reach human-level performance. In summary, our work offers a promising starting point for design mining visualizations to build recommenders.",
"title": "Dashboard Design Mining and Recommendation",
"normalizedTitle": "Dashboard Design Mining and Recommendation",
"fno": "10057994",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Layout",
"Encoding",
"Feature Extraction",
"Data Mining",
"Visualization",
"Software Development Management",
"Design Mining",
"Visualization Recommendation",
"Multiple View Visualization",
"Dashboards"
],
"authors": [
{
"givenName": "Yanna",
"surname": "Lin",
"fullName": "Yanna Lin",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Haotian",
"surname": "Li",
"fullName": "Haotian Li",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Aoyu",
"surname": "Wu",
"fullName": "Aoyu Wu",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yong",
"surname": "Wang",
"fullName": "Yong Wang",
"affiliation": "Singapore Management University, Singapore",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huamin",
"surname": "Qu",
"fullName": "Huamin Qu",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/hicss/2016/5670/0/5670d483",
"title": "Insights from the Design and Evaluation of a Personal Health Dashboard",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2016/5670d483/12OmNAnMuCE",
"parentPublication": {
"id": "proceedings/hicss/2016/5670/0",
"title": "2016 49th Hawaii International Conference on System Sciences (HICSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08443395",
"title": "What Do We Talk About When We Talk About Dashboards?",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08443395/17D45XDIXWb",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/03/08283817",
"title": "Using Dashboard Networks to Visualize Multiple Patient Histories: A Design Study on Post-Operative Prostate Cancer",
"doi": null,
"abstractUrl": "/journal/tg/2019/03/08283817/17D45XacGi3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09903550",
"title": "Dashboard Design Patterns",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09903550/1GZolSVvsPu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904866",
"title": "Visualization Design Practices in a Crisis: Behind the Scenes with COVID-19 Dashboard Creators",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904866/1H2llxba9ws",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09906971",
"title": "DashBot: Insight-Driven Dashboard Generation Based on Deep Reinforcement Learning",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09906971/1H5EWMQX9ZK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09911200",
"title": "MEDLEY: Intent-based Recommendations to Support Dashboard Composition<sc/>",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09911200/1Hcjm0PMkgw",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/09/09035622",
"title": "LADV: Deep Learning Assisted Authoring of Dashboard Visualizations From Images and Sketches",
"doi": null,
"abstractUrl": "/journal/tg/2021/09/09035622/1iaeAO11H6o",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552449",
"title": "MultiVision: Designing Analytical Dashboards with Deep Learning Based Recommendation",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552449/1xic65iQBoY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2023/01/09656613",
"title": "Finding Their Data Voice: Practices and Challenges of Dashboard Users",
"doi": null,
"abstractUrl": "/magazine/cg/2023/01/09656613/1zumu8nC20U",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10057010",
"articleId": "1La0xnHKuAM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10057483",
"articleId": "1LbFmZlZK24",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1LdkjxiZML6",
"name": "ttg555501-010057994s1-tvcg-3251344-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010057994s1-tvcg-3251344-mm.zip",
"extension": "zip",
"size": "6.27 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LbFn8YmYjC",
"doi": "10.1109/TVCG.2023.3251648",
"abstract": "With the recent rise of Metaverse, online multiplayer VR applications are becoming increasingly prevalent worldwide. However, as multiple users are located in different physical environments, different reset frequencies and timings can lead to serious fairness issues for online collaborative/competitive VR applications. For the fairness of online VR apps/games, an ideal online RDW strategy must make the locomotion opportunities of different users equal, regardless of different physical environment layouts. The existing RDW methods lack the scheme to coordinate multiple users in different PEs, and thus have the issue of triggering too many resets for all the users under the locomotion fairness constraint. We propose a novel multi-user RDW method that is able to significantly reduce the overall reset number and give users a better immersive experience by providing a fair exploration. Our key idea is to first find out the “bottleneck” user that may cause all users to be reset and estimate the time to reset given the users' next targets, and then redirect all the users to favorable poses during that maximized bottleneck time to ensure the subsequent resets can be postponed as much as possible. More particularly, we develop methods to estimate the time of possibly encountering obstacles and the reachable area for a specific pose to enable the prediction of the next reset caused by any user. Our experiments and user study found that our method outperforms existing RDW methods in online VR applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the recent rise of Metaverse, online multiplayer VR applications are becoming increasingly prevalent worldwide. However, as multiple users are located in different physical environments, different reset frequencies and timings can lead to serious fairness issues for online collaborative/competitive VR applications. For the fairness of online VR apps/games, an ideal online RDW strategy must make the locomotion opportunities of different users equal, regardless of different physical environment layouts. The existing RDW methods lack the scheme to coordinate multiple users in different PEs, and thus have the issue of triggering too many resets for all the users under the locomotion fairness constraint. We propose a novel multi-user RDW method that is able to significantly reduce the overall reset number and give users a better immersive experience by providing a fair exploration. Our key idea is to first find out the “bottleneck” user that may cause all users to be reset and estimate the time to reset given the users' next targets, and then redirect all the users to favorable poses during that maximized bottleneck time to ensure the subsequent resets can be postponed as much as possible. More particularly, we develop methods to estimate the time of possibly encountering obstacles and the reachable area for a specific pose to enable the prediction of the next reset caused by any user. Our experiments and user study found that our method outperforms existing RDW methods in online VR applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the recent rise of Metaverse, online multiplayer VR applications are becoming increasingly prevalent worldwide. However, as multiple users are located in different physical environments, different reset frequencies and timings can lead to serious fairness issues for online collaborative/competitive VR applications. For the fairness of online VR apps/games, an ideal online RDW strategy must make the locomotion opportunities of different users equal, regardless of different physical environment layouts. The existing RDW methods lack the scheme to coordinate multiple users in different PEs, and thus have the issue of triggering too many resets for all the users under the locomotion fairness constraint. We propose a novel multi-user RDW method that is able to significantly reduce the overall reset number and give users a better immersive experience by providing a fair exploration. Our key idea is to first find out the “bottleneck” user that may cause all users to be reset and estimate the time to reset given the users' next targets, and then redirect all the users to favorable poses during that maximized bottleneck time to ensure the subsequent resets can be postponed as much as possible. More particularly, we develop methods to estimate the time of possibly encountering obstacles and the reachable area for a specific pose to enable the prediction of the next reset caused by any user. Our experiments and user study found that our method outperforms existing RDW methods in online VR applications.",
"title": "Multi-User Redirected Walking in Separate Physical Spaces for Online VR Scenarios",
"normalizedTitle": "Multi-User Redirected Walking in Separate Physical Spaces for Online VR Scenarios",
"fno": "10058042",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Legged Locomotion",
"Space Vehicles",
"Games",
"Aerospace Electronics",
"Task Analysis",
"Reinforcement Learning",
"Real Time Systems",
"Redirected Walking",
"Multi User",
"Online VR",
"Fairness"
],
"authors": [
{
"givenName": "Sen-Zhe",
"surname": "Xu",
"fullName": "Sen-Zhe Xu",
"affiliation": "YMSC, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jia-Hong",
"surname": "Liu",
"fullName": "Jia-Hong Liu",
"affiliation": "Department of Computer Science and Technology, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Miao",
"surname": "Wang",
"fullName": "Miao Wang",
"affiliation": "School of Computer Science and Engineering, Beihang University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Fang-Lue",
"surname": "Zhang",
"fullName": "Fang-Lue Zhang",
"affiliation": "School of Engineering and Computer Science, Victoria University of Wellington, Wellington, New Zealand",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Song-Hai",
"surname": "Zhang",
"fullName": "Song-Hai Zhang",
"affiliation": "Department of Computer Science and Technology, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-11",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892373",
"title": "Application of redirected walking in room-scale VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892373/12OmNxG1ySA",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446263",
"title": "Mobius Walker: Pitch and Roll Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446263/13bd1gJ1v07",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09733261",
"title": "One-step out-of-place resetting for redirected walking in VR",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09733261/1BENJyPkx5S",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a655",
"title": "Optimal Pose Guided Redirected Walking with Pose Score Precomputation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a655/1CJbHdnVzd6",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a922",
"title": "Robust Redirected Walking in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a922/1CJfaCP53nq",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09785918",
"title": "Redirected Walking for Exploring Immersive Virtual Spaces with HMD: A Comprehensive Review and Recent Advances",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09785918/1DPaEdHg6KQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09881577",
"title": "Making Resets away from Targets: POI aware Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09881577/1Gv8Ze0xuJG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a758",
"title": "Infinite Virtual Space Exploration Using Space Tiling and Perceivable Reset at Fixed Positions",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a758/1JrRneazFCw",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10049511",
"title": "Redirected Walking On Omnidirectional Treadmill",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10049511/1KYoAYFd0m4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a053",
"title": "Redirected Walking Based on Historical User Walking Data",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a053/1MNgUnNG7Ju",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10057483",
"articleId": "1LbFmZlZK24",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10058545",
"articleId": "1LdkjmPPahy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1LtR72RvlKM",
"name": "ttg555501-010058042s1-supp1-3251648.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010058042s1-supp1-3251648.mp4",
"extension": "mp4",
"size": "191 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1La0xnHKuAM",
"doi": "10.1109/TVCG.2023.3250488",
"abstract": "Assembly-based furniture with movable parts enables shape and structure reconfiguration, thus supporting multiple functions. Although a few attempts have been made for facilitating the creation of multi-function objects, designing such a multi-function assembly with the existing solutions often requires high imagination of designers. We develop the <italic>Magic Furniture</italic> system for users to easily create such designs simply given multiple cross-category objects. Our system automatically leverages the given objects as references to generate a 3D model with movable boards driven by back-and-forth movement mechanisms. By controlling the states of these mechanisms, a designed multi-function furniture object can be reconfigured to approximate the shapes and functions of the given objects. To ensure the designed furniture easy to transform between different functions, we perform an optimization algorithm to choose a proper number of movable boards and determine their shapes and sizes, following a set of design guidelines. We demonstrate the effectiveness of our system through various multi-function furniture designed with different sets of reference inputs and various movement constraints. We also evaluate the design results through several experiments including comparative and user studies.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Assembly-based furniture with movable parts enables shape and structure reconfiguration, thus supporting multiple functions. Although a few attempts have been made for facilitating the creation of multi-function objects, designing such a multi-function assembly with the existing solutions often requires high imagination of designers. We develop the <italic>Magic Furniture</italic> system for users to easily create such designs simply given multiple cross-category objects. Our system automatically leverages the given objects as references to generate a 3D model with movable boards driven by back-and-forth movement mechanisms. By controlling the states of these mechanisms, a designed multi-function furniture object can be reconfigured to approximate the shapes and functions of the given objects. To ensure the designed furniture easy to transform between different functions, we perform an optimization algorithm to choose a proper number of movable boards and determine their shapes and sizes, following a set of design guidelines. We demonstrate the effectiveness of our system through various multi-function furniture designed with different sets of reference inputs and various movement constraints. We also evaluate the design results through several experiments including comparative and user studies.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Assembly-based furniture with movable parts enables shape and structure reconfiguration, thus supporting multiple functions. Although a few attempts have been made for facilitating the creation of multi-function objects, designing such a multi-function assembly with the existing solutions often requires high imagination of designers. We develop the Magic Furniture system for users to easily create such designs simply given multiple cross-category objects. Our system automatically leverages the given objects as references to generate a 3D model with movable boards driven by back-and-forth movement mechanisms. By controlling the states of these mechanisms, a designed multi-function furniture object can be reconfigured to approximate the shapes and functions of the given objects. To ensure the designed furniture easy to transform between different functions, we perform an optimization algorithm to choose a proper number of movable boards and determine their shapes and sizes, following a set of design guidelines. We demonstrate the effectiveness of our system through various multi-function furniture designed with different sets of reference inputs and various movement constraints. We also evaluate the design results through several experiments including comparative and user studies.",
"title": "Magic Furniture: Design Paradigm of Multi-function Assembly",
"normalizedTitle": "Magic Furniture: Design Paradigm of Multi-function Assembly",
"fno": "10057010",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Shape",
"Three Dimensional Displays",
"Solid Modeling",
"Urban Areas",
"Rails",
"Merging",
"Media",
"Multi Function Design",
"Shape Reconfiguration",
"Assembly Based Modeling"
],
"authors": [
{
"givenName": "Qiang",
"surname": "Fu",
"fullName": "Qiang Fu",
"affiliation": "School of Digital Media and Design Arts, Beijing University of Posts and Telecommunications, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Fan",
"surname": "Zhang",
"fullName": "Fan Zhang",
"affiliation": "School of Digital Media and Design Arts, Beijing University of Posts and Telecommunications, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xueming",
"surname": "Li",
"fullName": "Xueming Li",
"affiliation": "School of Digital Media and Design Arts, Beijing University of Posts and Telecommunications, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hongbo",
"surname": "Fu",
"fullName": "Hongbo Fu",
"affiliation": "School of Creative Media, City University of Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-02-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a168",
"title": "[POSTER] An Inertial, Magnetic and Vision Based Trusted Pose Estimation for AR and 3D Data Qualification on Long Urban Pedestrian Displacements",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a168/12OmNBEYzOs",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1992/2855/0/00223170",
"title": "Indexing function-based categories for generic recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1992/00223170/12OmNxiKscx",
"parentPublication": {
"id": "proceedings/cvpr/1992/2855/0",
"title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dmdcm/2011/4413/0/4413a279",
"title": "Rapid Assembly Design for Solid Furniture Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/dmdcm/2011/4413a279/12OmNzBOibz",
"parentPublication": {
"id": "proceedings/dmdcm/2011/4413/0",
"title": "Digital Media and Digital Content Management, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisis/2013/4992/0/4992a726",
"title": "Automatic 3D Furniture Layout Based on Interactive Evolutionary Computation",
"doi": null,
"abstractUrl": "/proceedings-article/cisis/2013/4992a726/12OmNzaQoLr",
"parentPublication": {
"id": "proceedings/cisis/2013/4992/0",
"title": "2013 Seventh International Conference on Complex, Intelligent, and Software Intensive Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08039524",
"title": "A Data-Driven Approach for Furniture and Indoor Scene Colorization",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08039524/13rRUy3gn7D",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2017/2715/0/08258459",
"title": "Deep model style: Cross-class style compatibility for 3D furniture within a scene",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2017/08258459/17D45WrVg0e",
"parentPublication": {
"id": "proceedings/big-data/2017/2715/0",
"title": "2017 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icid/2021/2065/0/206500a147",
"title": "Intelligent furniture design and function simulation based on3D technology",
"doi": null,
"abstractUrl": "/proceedings-article/icid/2021/206500a147/1AjTBwUSDEA",
"parentPublication": {
"id": "proceedings/icid/2021/2065/0",
"title": "2021 2nd International Conference on Intelligent Design (ICID)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a798",
"title": "Moving Soon? Rearranging Furniture using Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a798/1CJcJw3fs2s",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ccict/2022/7224/0/722400a488",
"title": "Visualization Of Furniture Model Using Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ccict/2022/722400a488/1HpDS9q0OMo",
"parentPublication": {
"id": "proceedings/ccict/2022/7224/0",
"title": "2022 Fifth International Conference on Computational Intelligence and Communication Technologies (CCICT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2019/2297/0/229700a109",
"title": "Automatic Furniture Layout Based on Functional Area Division",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2019/229700a109/1fHklquet0s",
"parentPublication": {
"id": "proceedings/cw/2019/2297/0",
"title": "2019 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10057127",
"articleId": "1La0wW0rjEs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10057994",
"articleId": "1LbFmG2HHnW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1LbFo4ag5by",
"name": "ttg555501-010057010s1-supp1-3250488.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010057010s1-supp1-3250488.mp4",
"extension": "mp4",
"size": "69.5 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1La0wW0rjEs",
"doi": "10.1109/TVCG.2023.3250166",
"abstract": "Spatial and temporal interactions are central and fundamental in many activities in our world. A common problem faced when visualizing this type of data is how to provide an overview that helps users navigate efficiently. Traditional approaches use coordinated views or 3D metaphors like the Space-time cube to tackle this problem. However, they suffer from overplotting and often lack spatial context, hindering data exploration. More recent techniques, such as <italic>MotionRugs</italic>, propose compact temporal summaries based on 1D projection. While powerful, these techniques do not support the situation for which the spatial extent of the objects and their intersections is relevant, such as the analysis of surveillance videos or tracking weather storms. In this paper, we propose MoReVis, a visual overview of spatiotemporal data that considers the objects' spatial extent and strives to show spatial interactions among these objects by displaying spatial intersections. Like previous techniques, our method involves projecting the spatial coordinates to 1D to produce compact summaries. However, our solution's core consists of performing a layout optimization step that sets the size and positions of the visual marks on the summary to resemble the actual values on the original space. We also provide multiple interactive mechanisms to make interpreting the results more straightforward for the user. We perform an extensive experimental evaluation and usage scenarios. Moreover, we evaluated the usefulness of MoReVis in a study with 9 participants. The results point out the effectiveness and suitability of our method in representing different datasets compared to traditional techniques.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Spatial and temporal interactions are central and fundamental in many activities in our world. A common problem faced when visualizing this type of data is how to provide an overview that helps users navigate efficiently. Traditional approaches use coordinated views or 3D metaphors like the Space-time cube to tackle this problem. However, they suffer from overplotting and often lack spatial context, hindering data exploration. More recent techniques, such as <italic>MotionRugs</italic>, propose compact temporal summaries based on 1D projection. While powerful, these techniques do not support the situation for which the spatial extent of the objects and their intersections is relevant, such as the analysis of surveillance videos or tracking weather storms. In this paper, we propose MoReVis, a visual overview of spatiotemporal data that considers the objects' spatial extent and strives to show spatial interactions among these objects by displaying spatial intersections. Like previous techniques, our method involves projecting the spatial coordinates to 1D to produce compact summaries. However, our solution's core consists of performing a layout optimization step that sets the size and positions of the visual marks on the summary to resemble the actual values on the original space. We also provide multiple interactive mechanisms to make interpreting the results more straightforward for the user. We perform an extensive experimental evaluation and usage scenarios. Moreover, we evaluated the usefulness of MoReVis in a study with 9 participants. The results point out the effectiveness and suitability of our method in representing different datasets compared to traditional techniques.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Spatial and temporal interactions are central and fundamental in many activities in our world. A common problem faced when visualizing this type of data is how to provide an overview that helps users navigate efficiently. Traditional approaches use coordinated views or 3D metaphors like the Space-time cube to tackle this problem. However, they suffer from overplotting and often lack spatial context, hindering data exploration. More recent techniques, such as MotionRugs, propose compact temporal summaries based on 1D projection. While powerful, these techniques do not support the situation for which the spatial extent of the objects and their intersections is relevant, such as the analysis of surveillance videos or tracking weather storms. In this paper, we propose MoReVis, a visual overview of spatiotemporal data that considers the objects' spatial extent and strives to show spatial interactions among these objects by displaying spatial intersections. Like previous techniques, our method involves projecting the spatial coordinates to 1D to produce compact summaries. However, our solution's core consists of performing a layout optimization step that sets the size and positions of the visual marks on the summary to resemble the actual values on the original space. We also provide multiple interactive mechanisms to make interpreting the results more straightforward for the user. We perform an extensive experimental evaluation and usage scenarios. Moreover, we evaluated the usefulness of MoReVis in a study with 9 participants. The results point out the effectiveness and suitability of our method in representing different datasets compared to traditional techniques.",
"title": "MoReVis: A Visual Summary for Spatiotemporal Moving Regions",
"normalizedTitle": "MoReVis: A Visual Summary for Spatiotemporal Moving Regions",
"fno": "10057127",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Trajectory",
"Data Visualization",
"Visualization",
"Videos",
"Three Dimensional Displays",
"Spatiotemporal Phenomena",
"Layout",
"Spatiotemporal Visualization",
"Spatial Interactions",
"Spatial Abstraction"
],
"authors": [
{
"givenName": "Giovani",
"surname": "Valdrighi",
"fullName": "Giovani Valdrighi",
"affiliation": "Fundação Getulio Vargas, Brazil",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Nivan",
"surname": "Ferreira",
"fullName": "Nivan Ferreira",
"affiliation": "Universidade Federal de Pernambuco, Brazil",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jorge",
"surname": "Poco",
"fullName": "Jorge Poco",
"affiliation": "Fundação Getulio Vargas, Brazil",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-02-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icde/2014/2555/0/06816707",
"title": "Interactive hierarchical tag clouds for summarizing spatiotemporal social contents",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2014/06816707/12OmNz6iOxa",
"parentPublication": {
"id": "proceedings/icde/2014/2555/0",
"title": "2014 IEEE 30th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2012/4711/0/4711a158",
"title": "Spatiotemporal Saliency Detection via Sparse Representation",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2012/4711a158/12OmNzAoi3b",
"parentPublication": {
"id": "proceedings/icme/2012/4711/0",
"title": "2012 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/etvis/2016/4731/0/07851160",
"title": "Hilbert attention maps for visualizing spatiotemporal gaze data",
"doi": null,
"abstractUrl": "/proceedings-article/etvis/2016/07851160/12OmNzVoBuv",
"parentPublication": {
"id": "proceedings/etvis/2016/4731/0",
"title": "2016 IEEE Second Workshop on Eye Tracking and Visualization (ETVIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/10/ttg2013101664",
"title": "Compact Video Synopsis via Global Spatiotemporal Optimization",
"doi": null,
"abstractUrl": "/journal/tg/2013/10/ttg2013101664/13rRUx0xPII",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08022952",
"title": "Voila: Visual Anomaly Detection and Monitoring with Streaming Spatiotemporal Data",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08022952/13rRUyogGAi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2017/2715/0/08258325",
"title": "Spatiotemporal visualization of traffic paths using color space time curve",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2017/08258325/17D45XeKgni",
"parentPublication": {
"id": "proceedings/big-data/2017/2715/0",
"title": "2017 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2019/0990/0/08909874",
"title": "Multi-Component Spatiotemporal Attention and its Application to Object Detection in Surveillance Videos",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2019/08909874/1febKqmL2q4",
"parentPublication": {
"id": "proceedings/avss/2019/0990/0",
"title": "2019 16th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/02/09151354",
"title": "Spatiotemporal Bundle Adjustment for Dynamic 3D Human Reconstruction in the Wild",
"doi": null,
"abstractUrl": "/journal/tp/2022/02/09151354/1lPCkW5UbPG",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2020/9134/0/913400a467",
"title": "Spatiotemporal Phenomena Summarization through Static Visual Narratives",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2020/913400a467/1rSRaNwIpFK",
"parentPublication": {
"id": "proceedings/iv/2020/9134/0",
"title": "2020 24th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2021/0019/0/09597460",
"title": "Spatiotemporal Contrastive Learning of Facial Expressions in Videos",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2021/09597460/1yylbtxovaE",
"parentPublication": {
"id": "proceedings/acii/2021/0019/0",
"title": "2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10056593",
"articleId": "1L8lOJ1pSiA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10057010",
"articleId": "1La0xnHKuAM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1LdkiuWAQc8",
"name": "ttg555501-010057127s1-supp1-3250166.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010057127s1-supp1-3250166.mp4",
"extension": "mp4",
"size": "207 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1L8lOJ1pSiA",
"doi": "10.1109/TVCG.2023.3243676",
"abstract": "Open-domain question answering (OpenQA) is an essential but challenging task in natural language processing that aims to answer questions in natural language formats on the basis of large-scale unstructured passages. Recent research has taken the performance of benchmark datasets to new heights, especially when these datasets are combined with techniques for machine reading comprehension based on Transformer models. However, as identified through our ongoing collaboration with domain experts and our review of literature, three key challenges limit their further improvement: (i) complex data with multiple long texts, (ii) complex model architecture with multiple modules, and (iii) semantically complex decision process. In this paper, we present VEQA, a visual analytics system that helps experts understand the decision reasons of OpenQA and provides insights into model improvement. The system summarizes the data flow within and between modules in the OpenQA model as the decision process takes place at the summary, instance and candidate levels. Specifically, it guides users through a summary visualization of dataset and module response to explore individual instances with a ranking visualization that incorporates context. Furthermore, VEQA supports fine-grained exploration of the decision flow within a single module through a comparative tree visualization. We demonstrate the effectiveness of VEQA in promoting interpretability and providing insights into model enhancement through a case study and expert evaluation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Open-domain question answering (OpenQA) is an essential but challenging task in natural language processing that aims to answer questions in natural language formats on the basis of large-scale unstructured passages. Recent research has taken the performance of benchmark datasets to new heights, especially when these datasets are combined with techniques for machine reading comprehension based on Transformer models. However, as identified through our ongoing collaboration with domain experts and our review of literature, three key challenges limit their further improvement: (i) complex data with multiple long texts, (ii) complex model architecture with multiple modules, and (iii) semantically complex decision process. In this paper, we present VEQA, a visual analytics system that helps experts understand the decision reasons of OpenQA and provides insights into model improvement. The system summarizes the data flow within and between modules in the OpenQA model as the decision process takes place at the summary, instance and candidate levels. Specifically, it guides users through a summary visualization of dataset and module response to explore individual instances with a ranking visualization that incorporates context. Furthermore, VEQA supports fine-grained exploration of the decision flow within a single module through a comparative tree visualization. We demonstrate the effectiveness of VEQA in promoting interpretability and providing insights into model enhancement through a case study and expert evaluation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Open-domain question answering (OpenQA) is an essential but challenging task in natural language processing that aims to answer questions in natural language formats on the basis of large-scale unstructured passages. Recent research has taken the performance of benchmark datasets to new heights, especially when these datasets are combined with techniques for machine reading comprehension based on Transformer models. However, as identified through our ongoing collaboration with domain experts and our review of literature, three key challenges limit their further improvement: (i) complex data with multiple long texts, (ii) complex model architecture with multiple modules, and (iii) semantically complex decision process. In this paper, we present VEQA, a visual analytics system that helps experts understand the decision reasons of OpenQA and provides insights into model improvement. The system summarizes the data flow within and between modules in the OpenQA model as the decision process takes place at the summary, instance and candidate levels. Specifically, it guides users through a summary visualization of dataset and module response to explore individual instances with a ranking visualization that incorporates context. Furthermore, VEQA supports fine-grained exploration of the decision flow within a single module through a comparative tree visualization. We demonstrate the effectiveness of VEQA in promoting interpretability and providing insights into model enhancement through a case study and expert evaluation.",
"title": "Visual Explanation for Open-domain Question Answering with BERT",
"normalizedTitle": "Visual Explanation for Open-domain Question Answering with BERT",
"fno": "10056593",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Analytical Models",
"Transformers",
"Task Analysis",
"Data Models",
"Visual Analytics",
"Bit Error Rate",
"Semantics",
"Open Domain Question Answering",
"Explainable Machine Learning",
"Visual Analytics"
],
"authors": [
{
"givenName": "Zekai",
"surname": "Shao",
"fullName": "Zekai Shao",
"affiliation": "Fudan University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shuran",
"surname": "Sun",
"fullName": "Shuran Sun",
"affiliation": "Fudan University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yuheng",
"surname": "Zhao",
"fullName": "Yuheng Zhao",
"affiliation": "Fudan University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Siyuan",
"surname": "Wang",
"fullName": "Siyuan Wang",
"affiliation": "Fudan University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhongyu",
"surname": "Wei",
"fullName": "Zhongyu Wei",
"affiliation": "Fudan University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tao",
"surname": "Gui",
"fullName": "Tao Gui",
"affiliation": "Fudan University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Cagatay",
"surname": "Turkay",
"fullName": "Cagatay Turkay",
"affiliation": "University of Warwick, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Siming",
"surname": "Chen",
"fullName": "Siming Chen",
"affiliation": "Fudan University, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-02-01 00:00:00",
"pubType": "trans",
"pages": "1-18",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/hicss/2013/4892/0/4892b485",
"title": "Aperture: An Open Web 2.0 Visualization Framework",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2013/4892b485/12OmNCgrD1q",
"parentPublication": {
"id": "proceedings/hicss/2013/4892/0",
"title": "2013 46th Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2008/01/mcg2008010018",
"title": "An Information-Theoretic View of Visual Analytics",
"doi": null,
"abstractUrl": "/magazine/cg/2008/01/mcg2008010018/13rRUB6SpRW",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2017/3163/0/08585733",
"title": "QSAnglyzer: Visual Analytics for Prismatic Analysis of Question Answering System Evaluations",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2017/08585733/17D45VsBU7S",
"parentPublication": {
"id": "proceedings/vast/2017/3163/0",
"title": "2017 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icbk/2021/3858/0/385800a440",
"title": "Query-focused Abstractive Summarization via Question-answering Model",
"doi": null,
"abstractUrl": "/proceedings-article/icbk/2021/385800a440/1A9X0HKkSAg",
"parentPublication": {
"id": "proceedings/icbk/2021/3858/0",
"title": "2021 IEEE International Conference on Big Knowledge (ICBK)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictech/2022/9694/0/969400a355",
"title": "BERT-Based Mixed Question Answering Matching Model",
"doi": null,
"abstractUrl": "/proceedings-article/ictech/2022/969400a355/1FWmoX3KL7O",
"parentPublication": {
"id": "proceedings/ictech/2022/9694/0",
"title": "2022 11th International Conference of Information and Communication Technology (ICTech)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09910021",
"title": "FlowNL: Asking the Flow Data in Natural Languages",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09910021/1Hcj6hoXqkU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600b155",
"title": "VLC-BERT: Visual Question Answering with Contextualized Commonsense Knowledge",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600b155/1KxVl7qMe3e",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093596",
"title": "BERT Representations for Video Question Answering",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093596/1jPbmNJkjUk",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09216629",
"title": "A Visual Analytics Approach for Exploratory Causal Analysis: Exploration, Validation, and Applications",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09216629/1nJsGFc8lUY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2021/3931/0/393100a186",
"title": "Investigating the Evolution of Tree Boosting Models with Visual Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2021/393100a186/1tTtslm0K4g",
"parentPublication": {
"id": "proceedings/pacificvis/2021/3931/0",
"title": "2021 IEEE 14th Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10054065",
"articleId": "1L6HPm1LYZO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10057127",
"articleId": "1La0wW0rjEs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1La0x6yr7l6",
"name": "ttg555501-010056593s1-supp1-3243676.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010056593s1-supp1-3243676.mp4",
"extension": "mp4",
"size": "82.1 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1L6HPm1LYZO",
"doi": "10.1109/TVCG.2023.3248319",
"abstract": "Journalism has become more data-driven and inherently visual in recent years. Photographs, illustrations, infographics, data visualizations, and general images help convey complex topics to a wide audience. The way that visual artifacts influence how readers form an opinion beyond the text is an important issue to research, but there are few works about this topic. In this context, we research the persuasive, emotional and memorable dimensions of data visualizations and illustrations in journalistic storytelling for long-form articles. We conducted a user study and compared the effects which data visualizations and illustrations have on changing attitude towards a presented topic. While visual representations are usually studied along one dimension, in this experimental study, we explore the effects on readers' attitudes along three: persuasion, emotion, and information retention. By comparing different versions of the same article, we observe how attitudes differ based on the visual stimuli present, and how they are perceived when combined. Results indicate that the narrative using only data visualization elicits a stronger emotional impact than illustration-only visual support, as well as a significant change in the initial attitude about the topic. Our findings contribute to a growing body of literature on how visual artifacts may be used to inform and influence public opinion and debate. We present ideas for future work to generalize the results beyond the domain studied, the water crisis.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Journalism has become more data-driven and inherently visual in recent years. Photographs, illustrations, infographics, data visualizations, and general images help convey complex topics to a wide audience. The way that visual artifacts influence how readers form an opinion beyond the text is an important issue to research, but there are few works about this topic. In this context, we research the persuasive, emotional and memorable dimensions of data visualizations and illustrations in journalistic storytelling for long-form articles. We conducted a user study and compared the effects which data visualizations and illustrations have on changing attitude towards a presented topic. While visual representations are usually studied along one dimension, in this experimental study, we explore the effects on readers' attitudes along three: persuasion, emotion, and information retention. By comparing different versions of the same article, we observe how attitudes differ based on the visual stimuli present, and how they are perceived when combined. Results indicate that the narrative using only data visualization elicits a stronger emotional impact than illustration-only visual support, as well as a significant change in the initial attitude about the topic. Our findings contribute to a growing body of literature on how visual artifacts may be used to inform and influence public opinion and debate. We present ideas for future work to generalize the results beyond the domain studied, the water crisis.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Journalism has become more data-driven and inherently visual in recent years. Photographs, illustrations, infographics, data visualizations, and general images help convey complex topics to a wide audience. The way that visual artifacts influence how readers form an opinion beyond the text is an important issue to research, but there are few works about this topic. In this context, we research the persuasive, emotional and memorable dimensions of data visualizations and illustrations in journalistic storytelling for long-form articles. We conducted a user study and compared the effects which data visualizations and illustrations have on changing attitude towards a presented topic. While visual representations are usually studied along one dimension, in this experimental study, we explore the effects on readers' attitudes along three: persuasion, emotion, and information retention. By comparing different versions of the same article, we observe how attitudes differ based on the visual stimuli present, and how they are perceived when combined. Results indicate that the narrative using only data visualization elicits a stronger emotional impact than illustration-only visual support, as well as a significant change in the initial attitude about the topic. Our findings contribute to a growing body of literature on how visual artifacts may be used to inform and influence public opinion and debate. We present ideas for future work to generalize the results beyond the domain studied, the water crisis.",
"title": "Attitudinal effects of data visualizations and illustrations in data stories",
"normalizedTitle": "Attitudinal effects of data visualizations and illustrations in data stories",
"fno": "10054065",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Visualization",
"Journalism",
"Complexity Theory",
"Videos",
"Technological Innovation",
"Task Analysis",
"Data Stories",
"Attitude Change",
"Emotions",
"Quantitative And Qualitative Evaluation"
],
"authors": [
{
"givenName": "Manuela",
"surname": "Garretón",
"fullName": "Manuela Garretón",
"affiliation": "Department of Computer Sciences, Pontificia Universidad Católica de Chile, Chile",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Francesca",
"surname": "Morini",
"fullName": "Francesca Morini",
"affiliation": "Urban Complexity Lab, University of Applied Sciences Potsdam, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Pablo",
"surname": "Celhay",
"fullName": "Pablo Celhay",
"affiliation": "School of Government, Pontificia Universidad Católica de Chile, Chile",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Marian",
"surname": "Dörk",
"fullName": "Marian Dörk",
"affiliation": "Urban Complexity Lab, University of Applied Sciences Potsdam, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Denis",
"surname": "Parra",
"fullName": "Denis Parra",
"affiliation": "Department of Computer Sciences, Pontificia Universidad Católica de Chile, Chile",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2023-02-01 00:00:00",
"pubType": "trans",
"pages": "1-16",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/pacificvis/2012/0863/0/06183590",
"title": "Intelligent cutaway illustrations",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2012/06183590/12OmNzn38Sx",
"parentPublication": {
"id": "proceedings/pacificvis/2012/0863/0",
"title": "Visualization Symposium, IEEE Pacific",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08017597",
"title": "Data Visualization Saliency Model: A Tool for Evaluating Abstract Data Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017597/13rRUNvyaf6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg2010061139",
"title": "Narrative Visualization: Telling Stories with Data",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg2010061139/13rRUxAAST1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08233127",
"title": "Atom: A Grammar for Unit Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08233127/14H4WLzSYsE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08545064",
"title": "Aligning Text and Document Illustrations: Towards Visually Explainable Digital Humanities",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545064/17D45WK5Apx",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904428",
"title": "Photosensitive Accessibility for Interactive Data Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904428/1H1goP5OTrW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09905872",
"title": "Affective Learning Objectives for Communicative Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09905872/1H3ZV2tCxTa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis4good/2022/9421/0/942100a001",
"title": "Can Data Visualizations Change Minds? Identifying Mechanisms of Elaborative Thinking and Persuasion",
"doi": null,
"abstractUrl": "/proceedings-article/vis4good/2022/942100a001/1J2YmKoEzio",
"parentPublication": {
"id": "proceedings/vis4good/2022/9421/0",
"title": "2022 IEEE Workshop on Visualization for Social Good (VIS4Good)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552240",
"title": "Kineticharts: Augmenting Affective Expressiveness of Charts in Data Stories with Animation Design",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552240/1xic12y0QJG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2021/3335/0/333500a116",
"title": "Automatic Y-axis Rescaling in Dynamic Visualizations",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2021/333500a116/1yXu9LcLCjC",
"parentPublication": {
"id": "proceedings/vis/2021/3335/0",
"title": "2021 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10052758",
"articleId": "1L1HY1xpNvi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10056593",
"articleId": "1L8lOJ1pSiA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1LbFpDp0wNO",
"name": "ttg555501-010054065s1-tvcg-3248319-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010054065s1-tvcg-3248319-mm.zip",
"extension": "zip",
"size": "257 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1L1HY1xpNvi",
"doi": "10.1109/TVCG.2023.3248632",
"abstract": "In many scientific endeavors, increasingly abstract representations of data allow for new interpretive methodologies and conceptualization of phenomena. For example, moving from raw imaged pixels to segmented and reconstructed objects allows researchers new insights and means to direct their studies toward relevant areas. Thus, the development of new and improved methods for segmentation remains an active area of research. With advances in machine learning and neural networks, scientists have been focused on employing deep neural networks such as U-Net to obtain pixel-level segmentations, namely, defining associations between pixels and corresponding/referent objects and gathering those objects afterward. Topological analysis, such as the use of the Morse-Smale complex to encode regions of uniform gradient flow behavior, offers an alternative approach: first, create geometric priors, and then apply machine learning to classify. This approach is empirically motivated since phenomena of interest often appear as subsets of topological priors in many applications. Using topological elements not only reduces the learning space but also introduces the ability to use learnable geometries and connectivity to aid the classification of the segmentation target. In this paper, we describe an approach to creating learnable topological elements, explore the application of ML techniques to classification tasks in a number of areas, and demonstrate this approach as a viable alternative to pixel-level classification, with similar accuracy, improved execution time, and requiring marginal training data.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In many scientific endeavors, increasingly abstract representations of data allow for new interpretive methodologies and conceptualization of phenomena. For example, moving from raw imaged pixels to segmented and reconstructed objects allows researchers new insights and means to direct their studies toward relevant areas. Thus, the development of new and improved methods for segmentation remains an active area of research. With advances in machine learning and neural networks, scientists have been focused on employing deep neural networks such as U-Net to obtain pixel-level segmentations, namely, defining associations between pixels and corresponding/referent objects and gathering those objects afterward. Topological analysis, such as the use of the Morse-Smale complex to encode regions of uniform gradient flow behavior, offers an alternative approach: first, create geometric priors, and then apply machine learning to classify. This approach is empirically motivated since phenomena of interest often appear as subsets of topological priors in many applications. Using topological elements not only reduces the learning space but also introduces the ability to use learnable geometries and connectivity to aid the classification of the segmentation target. In this paper, we describe an approach to creating learnable topological elements, explore the application of ML techniques to classification tasks in a number of areas, and demonstrate this approach as a viable alternative to pixel-level classification, with similar accuracy, improved execution time, and requiring marginal training data.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In many scientific endeavors, increasingly abstract representations of data allow for new interpretive methodologies and conceptualization of phenomena. For example, moving from raw imaged pixels to segmented and reconstructed objects allows researchers new insights and means to direct their studies toward relevant areas. Thus, the development of new and improved methods for segmentation remains an active area of research. With advances in machine learning and neural networks, scientists have been focused on employing deep neural networks such as U-Net to obtain pixel-level segmentations, namely, defining associations between pixels and corresponding/referent objects and gathering those objects afterward. Topological analysis, such as the use of the Morse-Smale complex to encode regions of uniform gradient flow behavior, offers an alternative approach: first, create geometric priors, and then apply machine learning to classify. This approach is empirically motivated since phenomena of interest often appear as subsets of topological priors in many applications. Using topological elements not only reduces the learning space but also introduces the ability to use learnable geometries and connectivity to aid the classification of the segmentation target. In this paper, we describe an approach to creating learnable topological elements, explore the application of ML techniques to classification tasks in a number of areas, and demonstrate this approach as a viable alternative to pixel-level classification, with similar accuracy, improved execution time, and requiring marginal training data.",
"title": "Exploring Classification of Topological Priors with Machine Learning for Feature Extraction",
"normalizedTitle": "Exploring Classification of Topological Priors with Machine Learning for Feature Extraction",
"fno": "10052758",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Image Segmentation",
"Machine Learning",
"Semantics",
"Task Analysis",
"Labeling",
"Training",
"Topology",
"Computational Topology",
"Topological Data Analysis",
"Machine Learning",
"Graph Learning",
"Graph Neural Networks",
"Morse Smale Complex",
"Scientific Visualization",
"Segmentation",
"Feature Detection"
],
"authors": [
{
"givenName": "Samuel",
"surname": "Leventhal",
"fullName": "Samuel Leventhal",
"affiliation": "Scientific Computing and Imaging Institute, University of Utah School of Computing, Salt Lake City, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Attila",
"surname": "Gyulassy",
"fullName": "Attila Gyulassy",
"affiliation": "Scientific Computing and Imaging Institute, University of Utah School of Computing, Salt Lake City, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mark",
"surname": "Heimann",
"fullName": "Mark Heimann",
"affiliation": "Lawrence Livermore National Laboratory, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Valerio",
"surname": "Pascucci",
"fullName": "Valerio Pascucci",
"affiliation": "Scientific Computing and Imaging Institute, University of Utah School of Computing, Salt Lake City, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-02-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/bibe/2007/1509/0/04375736",
"title": "Exploring Topological Properties of NMR Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2007/04375736/12OmNCbCrPb",
"parentPublication": {
"id": "proceedings/bibe/2007/1509/0",
"title": "7th IEEE International Conference on Bioinformatics and Bioengineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1991/2148/0/00139731",
"title": "Topological segmentation of discrete surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1991/00139731/12OmNvrdI6q",
"parentPublication": {
"id": "proceedings/cvpr/1991/2148/0",
"title": "Proceedings. 1991 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2013/2840/0/2840c328",
"title": "Proportion Priors for Image Sequence Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2013/2840c328/12OmNxdm4wa",
"parentPublication": {
"id": "proceedings/iccv/2013/2840/0",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2013/3022/0/3022a015",
"title": "Proximity Priors for Variational Semantic Segmentation and Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2013/3022a015/12OmNzDvSh9",
"parentPublication": {
"id": "proceedings/iccvw/2013/3022/0",
"title": "2013 IEEE International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2006/08/i1262",
"title": "Dynamical Statistical Shape Priors for Level Set-Based Tracking",
"doi": null,
"abstractUrl": "/journal/tp/2006/08/i1262/13rRUxBrGi0",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2018/06/07944591",
"title": "Incorporating Network Built-in Priors in Weakly-Supervised Semantic Segmentation",
"doi": null,
"abstractUrl": "/journal/tp/2018/06/07944591/13rRUxDqS9E",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2021/3902/0/09672024",
"title": "Combining Geometric and Topological Information for Boundary Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2021/09672024/1A8h20c2IzC",
"parentPublication": {
"id": "proceedings/big-data/2021/3902/0",
"title": "2021 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300b715",
"title": "Topological Map Extraction From Overhead Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300b715/1hVlkVtuWfS",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/09/09321177",
"title": "Fast 3D Indoor Scene Synthesis by Learning Spatial Relation Priors of Objects",
"doi": null,
"abstractUrl": "/journal/tg/2022/09/09321177/1qkwF6Uf61y",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10049688",
"articleId": "1KYoraK6mLm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10054065",
"articleId": "1L6HPm1LYZO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1KYoAYFd0m4",
"doi": "10.1109/TVCG.2023.3244359",
"abstract": "Redirected walking (RDW) and omnidirectional treadmill (ODT) are two effective solutions to the natural locomotion interface in virtual reality. ODT fully compresses the physical space and can be used as the integration carrier of all kinds of devices. However, the user experience varies in different directions of ODT, and the premise of interaction between users and integrated devices is a good match between virtual and real objects. RDW technology uses visual cues to guide the user's location in physical space. Based on this principle, combining RDW technology with ODT to guide the user's walking direction through visual cues can effectively improve user experience on ODT and make full use of various devices integrated on ODT. This paper explores the novel prospects of combining RDW technology with ODT and formally puts forward the concept of O-RDW (ODT-based RDW). Two baseline algorithms, i.e., OS2MD (ODT-based steer to multi-direction), and OS2MT (ODT-based steer to multi-target), are proposed to combine the merits of both RDW and ODT. With the help of the simulation environment, this paper quantitatively analyzes the applicable scenarios of the two algorithms and the influence of several main factors on the performance. Based on the conclusions of the simulation experiments, the two O-RDW algorithms are successfully applied in the practical application case of multi-target haptic feedback. Combined with the user study, the practicability and effectiveness of O-RDW technology in practical use are further verified.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Redirected walking (RDW) and omnidirectional treadmill (ODT) are two effective solutions to the natural locomotion interface in virtual reality. ODT fully compresses the physical space and can be used as the integration carrier of all kinds of devices. However, the user experience varies in different directions of ODT, and the premise of interaction between users and integrated devices is a good match between virtual and real objects. RDW technology uses visual cues to guide the user's location in physical space. Based on this principle, combining RDW technology with ODT to guide the user's walking direction through visual cues can effectively improve user experience on ODT and make full use of various devices integrated on ODT. This paper explores the novel prospects of combining RDW technology with ODT and formally puts forward the concept of O-RDW (ODT-based RDW). Two baseline algorithms, i.e., OS2MD (ODT-based steer to multi-direction), and OS2MT (ODT-based steer to multi-target), are proposed to combine the merits of both RDW and ODT. With the help of the simulation environment, this paper quantitatively analyzes the applicable scenarios of the two algorithms and the influence of several main factors on the performance. Based on the conclusions of the simulation experiments, the two O-RDW algorithms are successfully applied in the practical application case of multi-target haptic feedback. Combined with the user study, the practicability and effectiveness of O-RDW technology in practical use are further verified.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Redirected walking (RDW) and omnidirectional treadmill (ODT) are two effective solutions to the natural locomotion interface in virtual reality. ODT fully compresses the physical space and can be used as the integration carrier of all kinds of devices. However, the user experience varies in different directions of ODT, and the premise of interaction between users and integrated devices is a good match between virtual and real objects. RDW technology uses visual cues to guide the user's location in physical space. Based on this principle, combining RDW technology with ODT to guide the user's walking direction through visual cues can effectively improve user experience on ODT and make full use of various devices integrated on ODT. This paper explores the novel prospects of combining RDW technology with ODT and formally puts forward the concept of O-RDW (ODT-based RDW). Two baseline algorithms, i.e., OS2MD (ODT-based steer to multi-direction), and OS2MT (ODT-based steer to multi-target), are proposed to combine the merits of both RDW and ODT. With the help of the simulation environment, this paper quantitatively analyzes the applicable scenarios of the two algorithms and the influence of several main factors on the performance. Based on the conclusions of the simulation experiments, the two O-RDW algorithms are successfully applied in the practical application case of multi-target haptic feedback. Combined with the user study, the practicability and effectiveness of O-RDW technology in practical use are further verified.",
"title": "Redirected Walking On Omnidirectional Treadmill",
"normalizedTitle": "Redirected Walking On Omnidirectional Treadmill",
"fno": "10049511",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Legged Locomotion",
"Haptic Interfaces",
"Visualization",
"User Experience",
"Resists",
"Thermal Stability",
"Stability Analysis",
"Redirected Walking",
"Omnidirectional Treadmill",
"Haptic Feedback",
"Device Integration",
"Locomotion Interfaces"
],
"authors": [
{
"givenName": "Ziyao",
"surname": "Wang",
"fullName": "Ziyao Wang",
"affiliation": "Southeast University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yiye",
"surname": "Wang",
"fullName": "Yiye Wang",
"affiliation": "Southeast University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shiqi",
"surname": "Yan",
"fullName": "Shiqi Yan",
"affiliation": "Southeast University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhongzheng",
"surname": "Zhu",
"fullName": "Zhongzheng Zhu",
"affiliation": "Southeast University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "KanJian",
"surname": "Zhang",
"fullName": "KanJian Zhang",
"affiliation": "Southeast University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Haikun",
"surname": "Wei",
"fullName": "Haikun Wei",
"affiliation": "Southeast University, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-02-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2014/04/ttg201404579",
"title": "Performance of Redirected Walking Algorithms in a Constrained Virtual World",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg201404579/13rRUwjoNx4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08645699",
"title": "Shrinking Circles: Adaptation to Increased Curvature Gain in Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08645699/17PYElBjW00",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a922",
"title": "Robust Redirected Walking in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a922/1CJfaCP53nq",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a053",
"title": "Redirected Walking Based on Historical User Walking Data",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a053/1MNgUnNG7Ju",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798231",
"title": "The Effect of Hanger Reflex on Virtual Reality Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798231/1cJ0KBrAUYE",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797818",
"title": "Effects of Tracking Area Shape and Size on Artificial Potential Field Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797818/1cJ1htJ7ArK",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998570",
"title": "A Steering Algorithm for Redirected Walking Using Reinforcement Learning",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998570/1hx2DxYanDy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089532",
"title": "Optimal Planning for Redirected Walking Based on Reinforcement Learning in Multi-user Environment with Irregularly Shaped Physical Space",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089532/1jIx7m6wYKc",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a201",
"title": "Evaluate Optimal Redirected Walking Planning Using Reinforcement Learning",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a201/1pBMkbxS3F6",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a184",
"title": "A Reinforcement Learning Approach to Redirected Walking with Passive Haptic Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a184/1yeCXhKVTXy",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10048575",
"articleId": "1KQ5KN76WNq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10049688",
"articleId": "1KYoraK6mLm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1L03bKknib6",
"name": "ttg555501-010049511s1-supp4-3244359.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010049511s1-supp4-3244359.mp4",
"extension": "mp4",
"size": "71.2 MB",
"__typename": "WebExtraType"
},
{
"id": "1L03cJBKccE",
"name": "ttg555501-010049511s1-supp5-3244359.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010049511s1-supp5-3244359.mp4",
"extension": "mp4",
"size": "67.3 MB",
"__typename": "WebExtraType"
},
{
"id": "1L03akJ8tmE",
"name": "ttg555501-010049511s1-supp1-3244359.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010049511s1-supp1-3244359.mp4",
"extension": "mp4",
"size": "8.45 MB",
"__typename": "WebExtraType"
},
{
"id": "1L03fapzrFK",
"name": "ttg555501-010049511s1-supp2-3244359.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010049511s1-supp2-3244359.mp4",
"extension": "mp4",
"size": "117 MB",
"__typename": "WebExtraType"
},
{
"id": "1L03aVPc8lq",
"name": "ttg555501-010049511s1-supp3-3244359.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010049511s1-supp3-3244359.mp4",
"extension": "mp4",
"size": "69.4 MB",
"__typename": "WebExtraType"
},
{
"id": "1L03e7qhshW",
"name": "ttg555501-010049511s1-supp6-3244359.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010049511s1-supp6-3244359.mp4",
"extension": "mp4",
"size": "73.8 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1KYoraK6mLm",
"doi": "10.1109/TVCG.2023.3247169",
"abstract": "The registration of unitary-modality geometric data has been successfully explored over past decades. However, existing approaches typically struggle to handle cross-modality data due to the <italic>intrinsic difference</italic> between different models. To address this problem, in this paper, we formulate the cross-modality registration problem as a <italic>consistent clustering process</italic>. First, we study the structure similarity between different modalities based on an <italic>adaptive</italic> fuzzy shape clustering, from which a coarse alignment is successfully operated. Then, we optimize the result using fuzzy clustering consistently, in which the source and target models are formulated as <italic>clustering memberships</italic> and <italic>centroids</italic>, respectively. This optimization casts new insight into point set registration, and substantially improves the robustness against outliers. Additionally, we investigate the effect of <italic>fuzzier</italic> in fuzzy clustering on the cross-modality registration problem, from which we theoretically prove that the classical Iterative Closest Point (ICP) algorithm is a special case of our newly defined objective function. Comprehensive experiments and analysis are conducted on both synthetic and real-world cross-modality datasets. Qualitative and quantitative results demonstrate that our method outperforms state-of-the-art approaches with higher accuracy and robustness. Our code is publicly available at <uri>https://github.com/zikai1/CrossModReg</uri>.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The registration of unitary-modality geometric data has been successfully explored over past decades. However, existing approaches typically struggle to handle cross-modality data due to the <italic>intrinsic difference</italic> between different models. To address this problem, in this paper, we formulate the cross-modality registration problem as a <italic>consistent clustering process</italic>. First, we study the structure similarity between different modalities based on an <italic>adaptive</italic> fuzzy shape clustering, from which a coarse alignment is successfully operated. Then, we optimize the result using fuzzy clustering consistently, in which the source and target models are formulated as <italic>clustering memberships</italic> and <italic>centroids</italic>, respectively. This optimization casts new insight into point set registration, and substantially improves the robustness against outliers. Additionally, we investigate the effect of <italic>fuzzier</italic> in fuzzy clustering on the cross-modality registration problem, from which we theoretically prove that the classical Iterative Closest Point (ICP) algorithm is a special case of our newly defined objective function. Comprehensive experiments and analysis are conducted on both synthetic and real-world cross-modality datasets. Qualitative and quantitative results demonstrate that our method outperforms state-of-the-art approaches with higher accuracy and robustness. Our code is publicly available at <uri>https://github.com/zikai1/CrossModReg</uri>.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The registration of unitary-modality geometric data has been successfully explored over past decades. However, existing approaches typically struggle to handle cross-modality data due to the intrinsic difference between different models. To address this problem, in this paper, we formulate the cross-modality registration problem as a consistent clustering process. First, we study the structure similarity between different modalities based on an adaptive fuzzy shape clustering, from which a coarse alignment is successfully operated. Then, we optimize the result using fuzzy clustering consistently, in which the source and target models are formulated as clustering memberships and centroids, respectively. This optimization casts new insight into point set registration, and substantially improves the robustness against outliers. Additionally, we investigate the effect of fuzzier in fuzzy clustering on the cross-modality registration problem, from which we theoretically prove that the classical Iterative Closest Point (ICP) algorithm is a special case of our newly defined objective function. Comprehensive experiments and analysis are conducted on both synthetic and real-world cross-modality datasets. Qualitative and quantitative results demonstrate that our method outperforms state-of-the-art approaches with higher accuracy and robustness. Our code is publicly available at https://github.com/zikai1/CrossModReg.",
"title": "Accurate Registration of Cross-Modality Geometry via Consistent Clustering",
"normalizedTitle": "Accurate Registration of Cross-Modality Geometry via Consistent Clustering",
"fno": "10049688",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Point Cloud Compression",
"Geometry",
"Three Dimensional Displays",
"Laser Radar",
"Tensors",
"Clustering Algorithms",
"Solid Modeling",
"Cross Modality Geometry",
"Point Cloud Registration",
"3 D Reconstruction",
"Adaptive Fuzzy Clustering",
"CAD"
],
"authors": [
{
"givenName": "Mingyang",
"surname": "Zhao",
"fullName": "Mingyang Zhao",
"affiliation": "Beijing Academy of Artificial Intelligence and the National Laboratory of Pattern Recognition (NLPR), Institute of Automation, CAS, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiaoshui",
"surname": "Huang",
"fullName": "Xiaoshui Huang",
"affiliation": "Shanghai AI Laboratory, Shanghai, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jingen",
"surname": "Jiang",
"fullName": "Jingen Jiang",
"affiliation": "State Key Laboratory of Multimodal Artificial Intelligence Systems (MAIS) and NLPR, Institute of Automation, Chinese Academy of Sciences, and the School of AI, UCAS, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Luntian",
"surname": "Mou",
"fullName": "Luntian Mou",
"affiliation": "Beijing Key Laboratory of Multimedia and Intelligent Software Technology, Beijing Institute of Artificial Intelligence, Beijing University of Technology, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dong-Ming",
"surname": "Yan",
"fullName": "Dong-Ming Yan",
"affiliation": "State Key Laboratory of Multimodal Artificial Intelligence Systems (MAIS) and NLPR, Institute of Automation, Chinese Academy of Sciences, and the School of AI, UCAS, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lei",
"surname": "Ma",
"fullName": "Lei Ma",
"affiliation": "Beijing Academy of Artificial Intelligence, the Institute for Artificial Intelligence and the National Biomedical Imaging Center, Peking University, Beijing, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-02-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/aipr/2010/8833/0/05759694",
"title": "Automated cross-sensor registration, orthorectification and geopositioning using LIDAR digital elevation models",
"doi": null,
"abstractUrl": "/proceedings-article/aipr/2010/05759694/12OmNzb7ZpV",
"parentPublication": {
"id": "proceedings/aipr/2010/8833/0",
"title": "2010 IEEE 39th Applied Imagery Pattern Recognition Workshop (AIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200p5994",
"title": "HRegNet: A Hierarchical Network for Large-scale Outdoor LiDAR Point Cloud Registration",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200p5994/1BmFeO4ChZC",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200p5273",
"title": "LSG-CPD: Coherent Point Drift with Local Surface Geometry for Point Cloud Registration",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200p5273/1BmIQQNgI12",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/citce/2021/2184/0/218400a001",
"title": "Improved Iterative Closest Point (ICP) Point Cloud Registration Algorithm based on Matching Point Pair Quadratic Filtering",
"doi": null,
"abstractUrl": "/proceedings-article/citce/2021/218400a001/1BtfTK26ZHO",
"parentPublication": {
"id": "proceedings/citce/2021/2184/0",
"title": "2021 International Conference on Computer, Internet of Things and Control Engineering (CITCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10091912",
"title": "QGORE: Quadratic-Time Guaranteed Outlier Removal for Point Cloud Registration",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10091912/1M4mYahKC4g",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10097640",
"title": "Sparse-to-Dense Matching Network for Large-scale LiDAR Point Cloud Registration",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10097640/1M9lILSRgL6",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300c365",
"title": "Robust Multi-Modality Multi-Object Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300c365/1hQqgSo8zJe",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09445585",
"title": "Consistent Two-Flow Network for Tele-Registration of Point Clouds",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09445585/1u8lzpSvnxu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2021/4121/0/412100a510",
"title": "FIRE: Unsupervised bi-directional inter- and intra-modality registration using deep networks",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2021/412100a510/1vb8VXU1o5i",
"parentPublication": {
"id": "proceedings/cbms/2021/4121/0",
"title": "2021 IEEE 34th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900p5955",
"title": "DeepI2P: Image-to-Point Cloud Registration via Deep Classification",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900p5955/1yeLi36Hp84",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10049511",
"articleId": "1KYoAYFd0m4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10052758",
"articleId": "1L1HY1xpNvi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1LKs6y1gEiQ",
"name": "ttg555501-010049688s1-supp1-3247169.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010049688s1-supp1-3247169.pdf",
"extension": "pdf",
"size": "10.5 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1KQ5KN76WNq",
"doi": "10.1109/TVCG.2023.3246092",
"abstract": "When humans generate stimuli voluntarily, they perceive the stimuli more weakly than those produced by others, which is called sensory attenuation (SA). SA has been investigated in various body parts, but it is unclear whether an extended body induces SA. This study investigated the SA of audio stimuli generated by an extended body. SA was assessed using a sound comparison task in a virtual environment. We prepared robotic arms as extended bodies, and the robotic arms were controlled by facial movements. To evaluate the SA of robotic arms, we conducted two experiments. Experiment 1 investigated the SA of the robotic arms under four conditions. The results showed that robotic arms manipulated by voluntary actions attenuated audio stimuli. Experiment 2 investigated the SA of the robotic arm and innate body under five conditions. The results indicated that the innate body and robotic arm induced SA, while there were differences in the sense of agency between the innate body and robotic arm. Analysis of the results indicated three findings regarding the SA of the extended body. First, controlling the robotic arm with voluntary actions in a virtual environment attenuates the audio stimuli. Second, there were differences in the sense of agency related to SA between extended and innate bodies. Third, the SA of the robotic arm was correlated with the sense of body ownership.",
"abstracts": [
{
"abstractType": "Regular",
"content": "When humans generate stimuli voluntarily, they perceive the stimuli more weakly than those produced by others, which is called sensory attenuation (SA). SA has been investigated in various body parts, but it is unclear whether an extended body induces SA. This study investigated the SA of audio stimuli generated by an extended body. SA was assessed using a sound comparison task in a virtual environment. We prepared robotic arms as extended bodies, and the robotic arms were controlled by facial movements. To evaluate the SA of robotic arms, we conducted two experiments. Experiment 1 investigated the SA of the robotic arms under four conditions. The results showed that robotic arms manipulated by voluntary actions attenuated audio stimuli. Experiment 2 investigated the SA of the robotic arm and innate body under five conditions. The results indicated that the innate body and robotic arm induced SA, while there were differences in the sense of agency between the innate body and robotic arm. Analysis of the results indicated three findings regarding the SA of the extended body. First, controlling the robotic arm with voluntary actions in a virtual environment attenuates the audio stimuli. Second, there were differences in the sense of agency related to SA between extended and innate bodies. Third, the SA of the robotic arm was correlated with the sense of body ownership.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "When humans generate stimuli voluntarily, they perceive the stimuli more weakly than those produced by others, which is called sensory attenuation (SA). SA has been investigated in various body parts, but it is unclear whether an extended body induces SA. This study investigated the SA of audio stimuli generated by an extended body. SA was assessed using a sound comparison task in a virtual environment. We prepared robotic arms as extended bodies, and the robotic arms were controlled by facial movements. To evaluate the SA of robotic arms, we conducted two experiments. Experiment 1 investigated the SA of the robotic arms under four conditions. The results showed that robotic arms manipulated by voluntary actions attenuated audio stimuli. Experiment 2 investigated the SA of the robotic arm and innate body under five conditions. The results indicated that the innate body and robotic arm induced SA, while there were differences in the sense of agency between the innate body and robotic arm. Analysis of the results indicated three findings regarding the SA of the extended body. First, controlling the robotic arm with voluntary actions in a virtual environment attenuates the audio stimuli. Second, there were differences in the sense of agency related to SA between extended and innate bodies. Third, the SA of the robotic arm was correlated with the sense of body ownership.",
"title": "Sensory Attenuation with a Virtual Robotic Arm Controlled Using Facial Movements",
"normalizedTitle": "Sensory Attenuation with a Virtual Robotic Arm Controlled Using Facial Movements",
"fno": "10048575",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Manipulators",
"Robot Sensing Systems",
"Rubber",
"Cognition",
"Virtual Environments",
"Task Analysis",
"Standards",
"Sensory Attenuation",
"Human Augmentation",
"Robotic Arm",
"Virtual Reality"
],
"authors": [
{
"givenName": "Masaaki",
"surname": "Fukuoka",
"fullName": "Masaaki Fukuoka",
"affiliation": "Faculty of Science and Technology, Keio University, Kanagawa, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Fumihiko",
"surname": "Nakamura",
"fullName": "Fumihiko Nakamura",
"affiliation": "Faculty of Science and Technology, Keio University, Kanagawa, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Adrien",
"surname": "Verhulst",
"fullName": "Adrien Verhulst",
"affiliation": "Sony Computer Science Laboratories and the Faculty of Science and Technology, Keio University, Kanagawa, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Masahiko",
"surname": "Inami",
"fullName": "Masahiko Inami",
"affiliation": "Department of Advanced Interdisciplinary Studies, The University of Tokyo, Tokyo, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Michiteru",
"surname": "Kitazaki",
"fullName": "Michiteru Kitazaki",
"affiliation": "Department of Computer Science and Engineering, Toyohashi University of Technology, Toyohashi, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Maki",
"surname": "Sugimoto",
"fullName": "Maki Sugimoto",
"affiliation": "Faculty of Science and Technology, Keio University, Kanagawa, Japan",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-02-01 00:00:00",
"pubType": "trans",
"pages": "1-17",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/tiiec/2013/5146/0/5146a277",
"title": "Robotic Arm Movements Wirelessly Synchronized with Human Arm Movements Using Real Time Image Processing",
"doi": null,
"abstractUrl": "/proceedings-article/tiiec/2013/5146a277/12OmNCcbEaU",
"parentPublication": {
"id": "proceedings/tiiec/2013/5146/0",
"title": "2013 Texas Instruments India Educators' Conference (TIIEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gtsd/2016/3638/0/3638a022",
"title": "Design of a Controlled Robotic Arm",
"doi": null,
"abstractUrl": "/proceedings-article/gtsd/2016/3638a022/12OmNqyUUuq",
"parentPublication": {
"id": "proceedings/gtsd/2016/3638/0",
"title": "2016 3rd International Conference on Green Technology and Sustainable Development (GTSD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsn/2022/1693/0/169300a101",
"title": "Arming IDS Researchers with a Robotic Arm Dataset",
"doi": null,
"abstractUrl": "/proceedings-article/dsn/2022/169300a101/1Fixffj8YA8",
"parentPublication": {
"id": "proceedings/dsn/2022/1693/0",
"title": "2022 52nd Annual IEEE/IFIP International Conference on Dependable Systems and Networks (DSN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a772",
"title": "Embodiment of an Avatar with Unnatural Arm Movements",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a772/1J7W9fEjd6g",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798351",
"title": "Parasitic Body: Exploring Perspective Dependency in a Shared Body with a Third Arm",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798351/1cJ0W5YaAtq",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifeea/2020/9627/0/962700a806",
"title": "Simple and Reliable: Mechanics Analysis for Robotic Arm of Strawberry Picking Robot",
"doi": null,
"abstractUrl": "/proceedings-article/ifeea/2020/962700a806/1rvCEsBQNJC",
"parentPublication": {
"id": "proceedings/ifeea/2020/9627/0",
"title": "2020 7th International Forum on Electrical Engineering and Automation (IFEEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichci/2020/2316/0/231600a426",
"title": "Analysis on Deep Reinforcement Learning in Industrial Robotic Arm",
"doi": null,
"abstractUrl": "/proceedings-article/ichci/2020/231600a426/1tuAe9IdpbW",
"parentPublication": {
"id": "proceedings/ichci/2020/2316/0",
"title": "2020 International Conference on Intelligent Computing and Human-Computer Interaction (ICHCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2020/7624/0/762400b729",
"title": "Automated Testing of Mobile Applications Using a Robotic Arm",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2020/762400b729/1uGZ1Sr8Yj6",
"parentPublication": {
"id": "proceedings/csci/2020/7624/0",
"title": "2020 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2021/4261/0/09635229",
"title": "A Long short-term Memory Model based on a Robotic Wheelchair as a Rehabilitation Assistant",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2021/09635229/1zmvoIkJTGw",
"parentPublication": {
"id": "proceedings/bibe/2021/4261/0",
"title": "2021 IEEE 21st International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmeas/2021/9768/0/976800a178",
"title": "Preliminary Concept Modelling, Evaluation and Selection of Robotic Arm for Light Lifting Application",
"doi": null,
"abstractUrl": "/proceedings-article/icmeas/2021/976800a178/1zuuTHF68FO",
"parentPublication": {
"id": "proceedings/icmeas/2021/9768/0",
"title": "2021 7th International Conference on Mechanical Engineering and Automation Science (ICMEAS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10045805",
"articleId": "1KOqKkmxUUU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10049511",
"articleId": "1KYoAYFd0m4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1KOqKyuerbW",
"doi": "10.1109/TVCG.2023.3245609",
"abstract": "Delivering customer services through video communications has brought new opportunities to analyze customer satisfaction for quality management. However, due to the lack of reliable self-reported responses, service providers are troubled by the inadequate estimation of customer services and the tedious investigation into multimodal video recordings. We introduce <italic>Anchorage</italic>, a visual analytics system to evaluate customer satisfaction by summarizing multimodal behavioral features in customer service videos and revealing abnormal operations in the service process. We leverage the semantically meaningful operations to introduce structured event understanding into videos which help service providers quickly navigate to events of their interest. <italic>Anchorage</italic> supports a comprehensive evaluation of customer satisfaction from the service and operation levels and efficient analysis of customer behavioral dynamics via multifaceted visualization views. We extensively evaluate <italic>Anchorage</italic> through a case study and a carefully-designed user study. The results demonstrate its effectiveness and usability in assessing customer satisfaction using customer service videos. We found that introducing event contexts in assessing customer satisfaction can enhance its performance without compromising annotation precision. Our approach can be adapted in situations where unlabelled and unstructured videos are collected along with sequential records.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Delivering customer services through video communications has brought new opportunities to analyze customer satisfaction for quality management. However, due to the lack of reliable self-reported responses, service providers are troubled by the inadequate estimation of customer services and the tedious investigation into multimodal video recordings. We introduce <italic>Anchorage</italic>, a visual analytics system to evaluate customer satisfaction by summarizing multimodal behavioral features in customer service videos and revealing abnormal operations in the service process. We leverage the semantically meaningful operations to introduce structured event understanding into videos which help service providers quickly navigate to events of their interest. <italic>Anchorage</italic> supports a comprehensive evaluation of customer satisfaction from the service and operation levels and efficient analysis of customer behavioral dynamics via multifaceted visualization views. We extensively evaluate <italic>Anchorage</italic> through a case study and a carefully-designed user study. The results demonstrate its effectiveness and usability in assessing customer satisfaction using customer service videos. We found that introducing event contexts in assessing customer satisfaction can enhance its performance without compromising annotation precision. Our approach can be adapted in situations where unlabelled and unstructured videos are collected along with sequential records.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Delivering customer services through video communications has brought new opportunities to analyze customer satisfaction for quality management. However, due to the lack of reliable self-reported responses, service providers are troubled by the inadequate estimation of customer services and the tedious investigation into multimodal video recordings. We introduce Anchorage, a visual analytics system to evaluate customer satisfaction by summarizing multimodal behavioral features in customer service videos and revealing abnormal operations in the service process. We leverage the semantically meaningful operations to introduce structured event understanding into videos which help service providers quickly navigate to events of their interest. Anchorage supports a comprehensive evaluation of customer satisfaction from the service and operation levels and efficient analysis of customer behavioral dynamics via multifaceted visualization views. We extensively evaluate Anchorage through a case study and a carefully-designed user study. The results demonstrate its effectiveness and usability in assessing customer satisfaction using customer service videos. We found that introducing event contexts in assessing customer satisfaction can enhance its performance without compromising annotation precision. Our approach can be adapted in situations where unlabelled and unstructured videos are collected along with sequential records.",
"title": "Anchorage: Visual Analysis of Satisfaction in Customer Service Videos Via Anchor Events",
"normalizedTitle": "Anchorage: Visual Analysis of Satisfaction in Customer Service Videos Via Anchor Events",
"fno": "10045801",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Videos",
"Customer Services",
"Behavioral Sciences",
"Customer Satisfaction",
"Visual Analytics",
"Visualization",
"Data Visualization",
"Customer Satisfaction",
"Video Data",
"Video Visualization",
"Visual Analytics"
],
"authors": [
{
"givenName": "Kam Kwai",
"surname": "Wong",
"fullName": "Kam Kwai Wong",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xingbo",
"surname": "Wang",
"fullName": "Xingbo Wang",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yong",
"surname": "Wang",
"fullName": "Yong Wang",
"affiliation": "Singapore Management University, Singapore",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jianben",
"surname": "He",
"fullName": "Jianben He",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rong",
"surname": "Zhang",
"fullName": "Rong Zhang",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huamin",
"surname": "Qu",
"fullName": "Huamin Qu",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-02-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iciii/2011/4523/3/4523c161",
"title": "Customer Satisfaction Evaluation in Retail Businesses",
"doi": null,
"abstractUrl": "/proceedings-article/iciii/2011/4523c161/12OmNqESugJ",
"parentPublication": {
"id": "proceedings/iciii/2011/4523/3",
"title": "International Conference on Information Management, Innovation Management and Industrial Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmb/2009/3691/0/3691a115",
"title": "Customer Satisfaction and Loyalty of Mobile Services",
"doi": null,
"abstractUrl": "/proceedings-article/icmb/2009/3691a115/12OmNqJHFrp",
"parentPublication": {
"id": "proceedings/icmb/2009/3691/0",
"title": "2009 Eighth International Conference on Mobile Business, ICMB",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isbim/2008/3560/2/3560b295",
"title": "Research of Customer Satisfaction Optimal Model Based on Business CRM",
"doi": null,
"abstractUrl": "/proceedings-article/isbim/2008/3560b295/12OmNrAdsEc",
"parentPublication": {
"id": "proceedings/isbim/2008/3560/2",
"title": "Business and Information Management, International Seminar on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icee/2010/3997/0/3997d111",
"title": "Fuzzy Comprehensive Evaluation Model of Customer Satisfaction Degree",
"doi": null,
"abstractUrl": "/proceedings-article/icee/2010/3997d111/12OmNrIJqqU",
"parentPublication": {
"id": "proceedings/icee/2010/3997/0",
"title": "International Conference on E-Business and E-Government",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iitaw/2009/3860/0/3860a431",
"title": "Application of Unascertained Measurement Model in Customer Satisfaction Measurement",
"doi": null,
"abstractUrl": "/proceedings-article/iitaw/2009/3860a431/12OmNrMZpnJ",
"parentPublication": {
"id": "proceedings/iitaw/2009/3860/0",
"title": "2009 Third International Symposium on Intelligent Information Technology Application Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciii/2011/4523/2/4523b190",
"title": "The Impact of Information and Information System Satisfaction on Customer Satisfaction under E-commerce",
"doi": null,
"abstractUrl": "/proceedings-article/iciii/2011/4523b190/12OmNx5Yvhe",
"parentPublication": {
"id": "proceedings/iciii/2011/4523/2",
"title": "International Conference on Information Management, Innovation Management and Industrial Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssme/2009/3729/0/3729a136",
"title": "Empirical Study on Electric Power Customer Satisfaction Based on Kano Model",
"doi": null,
"abstractUrl": "/proceedings-article/ssme/2009/3729a136/12OmNxisQNx",
"parentPublication": {
"id": "proceedings/ssme/2009/3729/0",
"title": "2009 IITA International Conference on Services Science, Management and Engineering (SSME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asia/2009/3910/0/3910a191",
"title": "B2B E-commerce Website Customer Satisfaction: A Formula and Scale",
"doi": null,
"abstractUrl": "/proceedings-article/asia/2009/3910a191/12OmNyRPgvB",
"parentPublication": {
"id": "proceedings/asia/2009/3910/0",
"title": "2009 International Asia Symposium on Intelligent Interaction and Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ijcss/2012/4731/0/4731a115",
"title": "Does Back-Office Employee Satisfaction Affect Customer Satisfaction? An Empirical Examination",
"doi": null,
"abstractUrl": "/proceedings-article/ijcss/2012/4731a115/12OmNybfr9O",
"parentPublication": {
"id": "proceedings/ijcss/2012/4731/0",
"title": "Service Sciences, International Joint Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/etcs/2010/3987/2/3987b399",
"title": "Research on Relationship of Customer Satisfaction in Chinese Higher Education",
"doi": null,
"abstractUrl": "/proceedings-article/etcs/2010/3987b399/12OmNzSQdm8",
"parentPublication": {
"id": "proceedings/etcs/2010/3987/2",
"title": "Education Technology and Computer Science, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10045018",
"articleId": "1KMLV0zTt1m",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10045805",
"articleId": "1KOqKkmxUUU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1KOqKkmxUUU",
"doi": "10.1109/TVCG.2023.3245583",
"abstract": "We propose a robust and highly realistic clothing modeling method to generate a 3D clothing model with visually consistent clothing style and wrinkles distribution from a single RGB image. Notably, this entire process only takes a few seconds. Our high-quality clothing results benefit from the idea of combining learning and optimization, making it highly robust. First, we use the neural networks to predict the normal map, a clothing mask, and a learning-based clothing model from input images. The predicted normal map can effectively capture high-frequency clothing deformation from image observations. Then, by introducing a normal-guided clothing fitting optimization, the normal maps are used to guide the clothing model to generate realistic wrinkles details. Finally, we utilize a clothing collar adjustment strategy to stylize clothing results using predicted clothing masks. An extended multi-view version of the clothing fitting is naturally developed, which can further improve the realism of the clothing without tedious effort. Extensive experiments have proven that our method achieves state-of-the-art clothing geometric accuracy and visual realism. More importantly, it is highly adaptable and robust to in-the-wild images. Further, our method can be easily extended to multi-view inputs to improve realism. In summary, our method can provide a low-cost and user-friendly solution to achieve realistic clothing modeling.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a robust and highly realistic clothing modeling method to generate a 3D clothing model with visually consistent clothing style and wrinkles distribution from a single RGB image. Notably, this entire process only takes a few seconds. Our high-quality clothing results benefit from the idea of combining learning and optimization, making it highly robust. First, we use the neural networks to predict the normal map, a clothing mask, and a learning-based clothing model from input images. The predicted normal map can effectively capture high-frequency clothing deformation from image observations. Then, by introducing a normal-guided clothing fitting optimization, the normal maps are used to guide the clothing model to generate realistic wrinkles details. Finally, we utilize a clothing collar adjustment strategy to stylize clothing results using predicted clothing masks. An extended multi-view version of the clothing fitting is naturally developed, which can further improve the realism of the clothing without tedious effort. Extensive experiments have proven that our method achieves state-of-the-art clothing geometric accuracy and visual realism. More importantly, it is highly adaptable and robust to in-the-wild images. Further, our method can be easily extended to multi-view inputs to improve realism. In summary, our method can provide a low-cost and user-friendly solution to achieve realistic clothing modeling.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a robust and highly realistic clothing modeling method to generate a 3D clothing model with visually consistent clothing style and wrinkles distribution from a single RGB image. Notably, this entire process only takes a few seconds. Our high-quality clothing results benefit from the idea of combining learning and optimization, making it highly robust. First, we use the neural networks to predict the normal map, a clothing mask, and a learning-based clothing model from input images. The predicted normal map can effectively capture high-frequency clothing deformation from image observations. Then, by introducing a normal-guided clothing fitting optimization, the normal maps are used to guide the clothing model to generate realistic wrinkles details. Finally, we utilize a clothing collar adjustment strategy to stylize clothing results using predicted clothing masks. An extended multi-view version of the clothing fitting is naturally developed, which can further improve the realism of the clothing without tedious effort. Extensive experiments have proven that our method achieves state-of-the-art clothing geometric accuracy and visual realism. More importantly, it is highly adaptable and robust to in-the-wild images. Further, our method can be easily extended to multi-view inputs to improve realism. In summary, our method can provide a low-cost and user-friendly solution to achieve realistic clothing modeling.",
"title": "Modeling Realistic Clothing from a Single Image under Normal Guide",
"normalizedTitle": "Modeling Realistic Clothing from a Single Image under Normal Guide",
"fno": "10045805",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Clothing",
"Deformation",
"Computational Modeling",
"Shape",
"Image Reconstruction",
"Fitting",
"Three Dimensional Displays",
"Clothing Modeling",
"Realistic Wrinkles Distribution",
"Stylized Cloth",
"Normal Map",
"Clothing Mask",
"Single Image"
],
"authors": [
{
"givenName": "Xinqi",
"surname": "Liu",
"fullName": "Xinqi Liu",
"affiliation": "Institute of Design Engineering, School of Mechanical Engineering, Zhejiang University, Hangzhou, PR China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jituo",
"surname": "Li",
"fullName": "Jituo Li",
"affiliation": "Institute of Design Engineering, School of Mechanical Engineering, Zhejiang University, Hangzhou, PR China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Guodong",
"surname": "Lu",
"fullName": "Guodong Lu",
"affiliation": "Institute of Design Engineering, School of Mechanical Engineering, Zhejiang University, Hangzhou, PR China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-02-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2017/1032/0/1032a853",
"title": "A Generative Model of People in Clothing",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032a853/12OmNCwlakU",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09869633",
"title": "Learning to Infer Inner-Body Under Clothing From Monocular Video",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09869633/1GeVHT5oVtC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956089",
"title": "FitGAN: Fit- and Shape-Realistic Generative Adversarial Networks for Fashion",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956089/1IHqz3SBHIk",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300c293",
"title": "Tex2Shape: Detailed Full Human Body Geometry From a Single Image",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300c293/1hQqm60gQVy",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300k0510",
"title": "VTNFP: An Image-Based Virtual Try-On Network With Body and Clothing Feature Preservation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300k0510/1hVlSD4rLA4",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800h363",
"title": "TailorNet: Predicting Clothing in 3D as a Function of Human Pose, Shape and Garment Style",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800h363/1m3nnD97pZu",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800g468",
"title": "Learning to Dress 3D People in Generative Clothing",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800g468/1m3nwUHFD68",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a322",
"title": "MonoClothCap: Towards Temporally Coherent Clothing Capture from Monocular RGB Video",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a322/1qyxk1bcV5S",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412908",
"title": "GarmentGAN: Photo-realistic Adversarial Fashion Transfer",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412908/1tmjfl4UquQ",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aemcse/2021/1596/0/159600a677",
"title": "High-precision 3D face normal map generation",
"doi": null,
"abstractUrl": "/proceedings-article/aemcse/2021/159600a677/1wcdaMerkQg",
"parentPublication": {
"id": "proceedings/aemcse/2021/1596/0",
"title": "2021 4th International Conference on Advanced Electronic Materials, Computers and Software Engineering (AEMCSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10045801",
"articleId": "1KOqKyuerbW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10048575",
"articleId": "1KQ5KN76WNq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1KQ5Lb9CCOc",
"name": "ttg555501-010045805s1-supp1-3245583.docx",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010045805s1-supp1-3245583.docx",
"extension": "docx",
"size": "30.5 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1KMLV0zTt1m",
"doi": "10.1109/TVCG.2023.3245305",
"abstract": "Neural networks have shown great success in extracting geometric information from color images. Especially, monocular depth estimation networks are increasingly reliable in real-world scenes. In this work we investigate the applicability of such monocular depth estimation networks to semi-transparent volume rendered images. As <italic>depth</italic> is notoriously difficult to define in a volumetric scene without clearly defined surfaces, we consider different depth computations that have emerged in practice, and compare state-of-the-art monocular depth estimation approaches for these different interpretations during an evaluation considering different degrees of opacity in the renderings. Additionally, we investigate how these networks can be extended to further obtain color and opacity information, in order to create a layered representation of the scene based on a single color image. This layered representation consists of spatially separated semi-transparent intervals that composite to the original input rendering. In our experiments we show that existing approaches to monocular depth estimation can be adapted to perform well on semi-transparent volume renderings, which has several applications in the area of scientific visualization, like re-composition with additional objects and labels or additional shading.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Neural networks have shown great success in extracting geometric information from color images. Especially, monocular depth estimation networks are increasingly reliable in real-world scenes. In this work we investigate the applicability of such monocular depth estimation networks to semi-transparent volume rendered images. As <italic>depth</italic> is notoriously difficult to define in a volumetric scene without clearly defined surfaces, we consider different depth computations that have emerged in practice, and compare state-of-the-art monocular depth estimation approaches for these different interpretations during an evaluation considering different degrees of opacity in the renderings. Additionally, we investigate how these networks can be extended to further obtain color and opacity information, in order to create a layered representation of the scene based on a single color image. This layered representation consists of spatially separated semi-transparent intervals that composite to the original input rendering. In our experiments we show that existing approaches to monocular depth estimation can be adapted to perform well on semi-transparent volume renderings, which has several applications in the area of scientific visualization, like re-composition with additional objects and labels or additional shading.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Neural networks have shown great success in extracting geometric information from color images. Especially, monocular depth estimation networks are increasingly reliable in real-world scenes. In this work we investigate the applicability of such monocular depth estimation networks to semi-transparent volume rendered images. As depth is notoriously difficult to define in a volumetric scene without clearly defined surfaces, we consider different depth computations that have emerged in practice, and compare state-of-the-art monocular depth estimation approaches for these different interpretations during an evaluation considering different degrees of opacity in the renderings. Additionally, we investigate how these networks can be extended to further obtain color and opacity information, in order to create a layered representation of the scene based on a single color image. This layered representation consists of spatially separated semi-transparent intervals that composite to the original input rendering. In our experiments we show that existing approaches to monocular depth estimation can be adapted to perform well on semi-transparent volume renderings, which has several applications in the area of scientific visualization, like re-composition with additional objects and labels or additional shading.",
"title": "Monocular Depth Decomposition of Semi-Transparent Volume Renderings",
"normalizedTitle": "Monocular Depth Decomposition of Semi-Transparent Volume Renderings",
"fno": "10045018",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Rendering Computer Graphics",
"Estimation",
"Image Color Analysis",
"Solid Modeling",
"Geometry",
"Data Visualization",
"Neural Networks",
"Volume Rendering",
"Depth Compositing",
"Monocular Depth Estimation"
],
"authors": [
{
"givenName": "Dominik",
"surname": "Engel",
"fullName": "Dominik Engel",
"affiliation": "Visual Computing Group, Ulm University, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sebastian",
"surname": "Hartwig",
"fullName": "Sebastian Hartwig",
"affiliation": "Visual Computing Group, Ulm University, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Timo",
"surname": "Ropinski",
"fullName": "Timo Ropinski",
"affiliation": "Visual Computing Group, Ulm University, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-02-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457c215",
"title": "Semi-Supervised Deep Learning for Monocular Depth Map Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457c215/12OmNAWH9HO",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2013/2576/0/06814989",
"title": "Screen-Space Ambient Occlusion Using A-Buffer Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2013/06814989/12OmNAs2tqk",
"parentPublication": {
"id": "proceedings/cad-graphics/2013/2576/0",
"title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2013/5099/0/5099a123",
"title": "Explorable Volumetric Depth Images from Raycasting",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2013/5099a123/12OmNwBT1oL",
"parentPublication": {
"id": "proceedings/sibgrapi/2013/5099/0",
"title": "2013 XXVI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000g393",
"title": "Aperture Supervision for Monocular Depth Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000g393/17D45WIXbNB",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956576",
"title": "Joint Self-Supervised Monocular Depth Estimation and SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956576/1IHpbIpwRfW",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2022/5670/0/567000a242",
"title": "MonoNHR: Monocular Neural Human Renderer",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2022/567000a242/1KYsx7s4siQ",
"parentPublication": {
"id": "proceedings/3dv/2022/5670/0",
"title": "2022 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/10/08807270",
"title": "Semi-Supervised Adversarial Monocular Depth Estimation",
"doi": null,
"abstractUrl": "/journal/tp/2020/10/08807270/1cG61HuI6v6",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2020/9891/0/09108693",
"title": "Depth Prediction for Monocular Direct Visual Odometry",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2020/09108693/1kpIGiAFaYo",
"parentPublication": {
"id": "proceedings/crv/2020/9891/0",
"title": "2020 17th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800a761",
"title": "Parameterization of Ambiguity in Monocular Depth Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800a761/1zWEbnpmKas",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800a648",
"title": "PanoDepth: A Two-Stage Approach for Monocular Omnidirectional Depth Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800a648/1zWEhPbXso8",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10044277",
"articleId": "1KL728MHdtu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10045801",
"articleId": "1KOqKyuerbW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1KOqOaVPxSM",
"name": "ttg555501-010045018s1-supp1-3245305.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010045018s1-supp1-3245305.pdf",
"extension": "pdf",
"size": "11.4 MB",
"__typename": "WebExtraType"
},
{
"id": "1KOqNHhjEoU",
"name": "ttg555501-010045018s1-supp2-3245305.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010045018s1-supp2-3245305.mp4",
"extension": "mp4",
"size": "13.8 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1KL728MHdtu",
"doi": "10.1109/TVCG.2023.3244679",
"abstract": "Gaze behavior of virtual characters in video games and virtual reality experiences is a key factor of realism and immersion. Indeed, gaze plays many roles when interacting with the environment; not only does it indicate what characters are looking at, but it also plays an important role in verbal and non-verbal behaviors and in making virtual characters alive. Automated computing of gaze behaviors is however a challenging problem, and to date none of the existing methods are capable of producing close-to-real results in an interactive context. We therefore propose a novel method that leverages recent advances in several distinct areas related to visual saliency, attention mechanisms, saccadic behavior modelling, and head-gaze animation techniques. Our approach articulates these advances to converge on a multi-map saliency-driven model which offers real-time realistic gaze behaviors for non-conversational characters, together with additional user-control over customizable features to compose a wide variety of results. We first evaluate the benefits of our approach through an objective evaluation that confronts our gaze simulation with ground truth data using an eye-tracking dataset specifically acquired for this purpose. We then rely on subjective evaluation to measure the level of realism of gaze animations generated by our method, in comparison with gaze animations captured from real actors. Our results show that our method generates gaze behaviors that cannot be distinguished from captured gaze animations. Overall, we believe that these results will open the way for more natural and intuitive design of realistic and coherent gaze animations for real-time applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Gaze behavior of virtual characters in video games and virtual reality experiences is a key factor of realism and immersion. Indeed, gaze plays many roles when interacting with the environment; not only does it indicate what characters are looking at, but it also plays an important role in verbal and non-verbal behaviors and in making virtual characters alive. Automated computing of gaze behaviors is however a challenging problem, and to date none of the existing methods are capable of producing close-to-real results in an interactive context. We therefore propose a novel method that leverages recent advances in several distinct areas related to visual saliency, attention mechanisms, saccadic behavior modelling, and head-gaze animation techniques. Our approach articulates these advances to converge on a multi-map saliency-driven model which offers real-time realistic gaze behaviors for non-conversational characters, together with additional user-control over customizable features to compose a wide variety of results. We first evaluate the benefits of our approach through an objective evaluation that confronts our gaze simulation with ground truth data using an eye-tracking dataset specifically acquired for this purpose. We then rely on subjective evaluation to measure the level of realism of gaze animations generated by our method, in comparison with gaze animations captured from real actors. Our results show that our method generates gaze behaviors that cannot be distinguished from captured gaze animations. Overall, we believe that these results will open the way for more natural and intuitive design of realistic and coherent gaze animations for real-time applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Gaze behavior of virtual characters in video games and virtual reality experiences is a key factor of realism and immersion. Indeed, gaze plays many roles when interacting with the environment; not only does it indicate what characters are looking at, but it also plays an important role in verbal and non-verbal behaviors and in making virtual characters alive. Automated computing of gaze behaviors is however a challenging problem, and to date none of the existing methods are capable of producing close-to-real results in an interactive context. We therefore propose a novel method that leverages recent advances in several distinct areas related to visual saliency, attention mechanisms, saccadic behavior modelling, and head-gaze animation techniques. Our approach articulates these advances to converge on a multi-map saliency-driven model which offers real-time realistic gaze behaviors for non-conversational characters, together with additional user-control over customizable features to compose a wide variety of results. We first evaluate the benefits of our approach through an objective evaluation that confronts our gaze simulation with ground truth data using an eye-tracking dataset specifically acquired for this purpose. We then rely on subjective evaluation to measure the level of realism of gaze animations generated by our method, in comparison with gaze animations captured from real actors. Our results show that our method generates gaze behaviors that cannot be distinguished from captured gaze animations. Overall, we believe that these results will open the way for more natural and intuitive design of realistic and coherent gaze animations for real-time applications.",
"title": "Real-time Multi-map Saliency-driven Gaze Behavior for Non-conversational Characters",
"normalizedTitle": "Real-time Multi-map Saliency-driven Gaze Behavior for Non-conversational Characters",
"fno": "10044277",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Behavioral Sciences",
"Animation",
"Visualization",
"Solid Modeling",
"Real Time Systems",
"Biological System Modeling",
"Head",
"Gaze Behavior",
"Simulation",
"Animation",
"Neural Networks",
"Eye Tracking Data",
"Dataset"
],
"authors": [
{
"givenName": "Ific",
"surname": "Goudé",
"fullName": "Ific Goudé",
"affiliation": "Inria, Univ Rennes, CNRS, IRISA, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Alexandre",
"surname": "Bruckert",
"fullName": "Alexandre Bruckert",
"affiliation": "Nantes Université, École Centrale Nantes, CNRS, LS2N, UMR 6004, Nantes, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anne-Hélène",
"surname": "Olivier",
"fullName": "Anne-Hélène Olivier",
"affiliation": "Inria, Univ Rennes, CNRS, IRISA, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Julien",
"surname": "Pettré",
"fullName": "Julien Pettré",
"affiliation": "Inria, Univ Rennes, CNRS, IRISA, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rémi",
"surname": "Cozot",
"fullName": "Rémi Cozot",
"affiliation": "Littoral Opal Coast University, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kadi",
"surname": "Bouatouch",
"fullName": "Kadi Bouatouch",
"affiliation": "Inria, Univ Rennes, CNRS, IRISA, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Marc",
"surname": "Christie",
"fullName": "Marc Christie",
"affiliation": "Inria, Univ Rennes, CNRS, IRISA, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ludovic",
"surname": "Hoyet",
"fullName": "Ludovic Hoyet",
"affiliation": "Inria, Univ Rennes, CNRS, IRISA, France",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-02-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vs-games/2017/5812/0/08056604",
"title": "Expressive virtual characters for social demonstration games",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2017/08056604/12OmNBpmDNS",
"parentPublication": {
"id": "proceedings/vs-games/2017/5812/0",
"title": "2017 9th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2009/4800/0/05349573",
"title": "Relations between facial display, eye gaze and head tilt: Dominance perception variations of virtual agents",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2009/05349573/12OmNxHJ9qY",
"parentPublication": {
"id": "proceedings/acii/2009/4800/0",
"title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2010/6331/0/05460171",
"title": "Animating Gaze Shifts for Virtual Characters Based on Head Movement Propensity",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2010/05460171/12OmNyKa67Q",
"parentPublication": {
"id": "proceedings/vs-games/2010/6331/0",
"title": "2010 2nd International Conference on Games and Virtual Worlds for Serious Applications (VS-GAMES 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446364",
"title": "Empirical Evaluation of Virtual Human Conversational and Affective Animations on Visual Attention in Inter-Personal Simulations",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446364/13bd1hyoTxR",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2010/04/mcg2010040062",
"title": "The Expressive Gaze Model: Using Gaze to Express Emotion",
"doi": null,
"abstractUrl": "/magazine/cg/2010/04/mcg2010040062/13rRUy0ZzSc",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ds-rt/2008/3425/0/04700111",
"title": "Communicating Eye Gaze across a Distance without Rooting Participants to the Spot",
"doi": null,
"abstractUrl": "/proceedings-article/ds-rt/2008/04700111/17D45XtvpaL",
"parentPublication": {
"id": "proceedings/ds-rt/2008/3425/0",
"title": "Distributed Simulation and Real Time Applications, IEEE/ACM International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200d722",
"title": "Looking here or there? Gaze Following in 360-Degree Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200d722/1BmL9UhfI88",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09872121",
"title": ": From real infrared eye-images to synthetic sequences of gaze behavior<italic/>",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09872121/1GhRV18KGvC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300b169",
"title": "SalGaze: Personalizing Gaze Estimation using Visual Saliency",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300b169/1i5mshouNby",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089573",
"title": "Effects of Interacting with a Crowd of Emotional Virtual Humans on Users’ Affective and Non-Verbal Behaviors",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089573/1jIxfPwklig",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10043789",
"articleId": "1KJsjepjFi8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10045018",
"articleId": "1KMLV0zTt1m",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1L8lPodO7Pa",
"name": "ttg555501-010044277s1-supp1-3244679.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010044277s1-supp1-3244679.mp4",
"extension": "mp4",
"size": "133 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1KJsjepjFi8",
"doi": "10.1109/TVCG.2023.3240503",
"abstract": "Designing thin-shell structures that are diverse, lightweight, and physically viable is a challenging task for traditional heuristic methods. To address this challenge, we present a novel parametric design framework for engraving regular, irregular, and customized patterns on thin-shell structures. Our method optimizes pattern parameters such as size and orientation, to ensure structural stiffness while minimizing material consumption. Our method is unique in that it works directly with shapes and patterns represented by functions, and can engrave patterns through simple function operations. By eliminating the need for remeshing in traditional FEM methods, our method is more computationally efficient in optimizing mechanical properties and can significantly increase the diversity of shell structure design. Quantitative evaluation confirms the convergence of the proposed method. We conduct experiments on regular, irregular, and customized patterns and present 3D printed results to demonstrate the effectiveness of our approach.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Designing thin-shell structures that are diverse, lightweight, and physically viable is a challenging task for traditional heuristic methods. To address this challenge, we present a novel parametric design framework for engraving regular, irregular, and customized patterns on thin-shell structures. Our method optimizes pattern parameters such as size and orientation, to ensure structural stiffness while minimizing material consumption. Our method is unique in that it works directly with shapes and patterns represented by functions, and can engrave patterns through simple function operations. By eliminating the need for remeshing in traditional FEM methods, our method is more computationally efficient in optimizing mechanical properties and can significantly increase the diversity of shell structure design. Quantitative evaluation confirms the convergence of the proposed method. We conduct experiments on regular, irregular, and customized patterns and present 3D printed results to demonstrate the effectiveness of our approach.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Designing thin-shell structures that are diverse, lightweight, and physically viable is a challenging task for traditional heuristic methods. To address this challenge, we present a novel parametric design framework for engraving regular, irregular, and customized patterns on thin-shell structures. Our method optimizes pattern parameters such as size and orientation, to ensure structural stiffness while minimizing material consumption. Our method is unique in that it works directly with shapes and patterns represented by functions, and can engrave patterns through simple function operations. By eliminating the need for remeshing in traditional FEM methods, our method is more computationally efficient in optimizing mechanical properties and can significantly increase the diversity of shell structure design. Quantitative evaluation confirms the convergence of the proposed method. We conduct experiments on regular, irregular, and customized patterns and present 3D printed results to demonstrate the effectiveness of our approach.",
"title": "A Parametric Design Method for Engraving Patterns on Thin Shells",
"normalizedTitle": "A Parametric Design Method for Engraving Patterns on Thin Shells",
"fno": "10043789",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Optimization",
"Shape",
"Topology",
"Design Methodology",
"Computational Modeling",
"Solid Modeling",
"Three Dimensional Displays",
"Parametric Design",
"Pattern Engraving",
"Structural Optimization",
"Thin Shells"
],
"authors": [
{
"givenName": "Jiangbei",
"surname": "Hu",
"fullName": "Jiangbei Hu",
"affiliation": "DUT-RU International School of Information and Software Engineering and Key Laboratory for Ubiquitous Network and Service Software of Liaoning Province, Dalian University of Technology, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shengfa",
"surname": "Wang",
"fullName": "Shengfa Wang",
"affiliation": "DUT-RU International School of Information and Software Engineering, Dalian University of Technology, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ying",
"surname": "He",
"fullName": "Ying He",
"affiliation": "School of Computer Science and Engineering, Nanyang Technological University, Singapore",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhongxuan",
"surname": "Luo",
"fullName": "Zhongxuan Luo",
"affiliation": "School of Software Technology, Dalian University of Technology, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Na",
"surname": "Lei",
"fullName": "Na Lei",
"affiliation": "DUT-RU International School of Information and Software Engineering, Dalian University of Technology, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ligang",
"surname": "Liu",
"fullName": "Ligang Liu",
"affiliation": "School of Mathematical Sciences, University of Science and Technology of China, Chia",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-02-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/crv/2015/1986/0/1986a214",
"title": "A Method for Global Non-rigid Registration of Multiple Thin Structures",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2015/1986a214/12OmNAtK4kg",
"parentPublication": {
"id": "proceedings/crv/2015/1986/0",
"title": "2015 12th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2015/8020/0/07450393",
"title": "Automatic Hierarchical Mid-surface Abstraction of Thin-Walled Models Based on Rib Decomposition",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2015/07450393/12OmNBTs7oo",
"parentPublication": {
"id": "proceedings/cad-graphics/2015/8020/0",
"title": "2015 14th International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391b671",
"title": "Shell PCA: Statistical Shape Modelling in Shell Space",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391b671/12OmNC2xhAr",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssst/1991/2190/0/00138591",
"title": "Scattering and radiation from cylindrical anisotropic shells",
"doi": null,
"abstractUrl": "/proceedings-article/ssst/1991/00138591/12OmNvnwVqK",
"parentPublication": {
"id": "proceedings/ssst/1991/2190/0",
"title": "The Twenty-Third Southeastern Symposium on System Theory",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2015/8332/0/8332a037",
"title": "Automatic Recovery of Networks of Thin Structures",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a037/12OmNwJgAIr",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2015/7143/0/7143b294",
"title": "The Study of Electromagnetic Signal Visualization Method Applied to the Whole World",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2015/7143b294/12OmNxXUhTb",
"parentPublication": {
"id": "proceedings/icmtma/2015/7143/0",
"title": "2015 Seventh International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ewdts/2017/3299/0/08110086",
"title": "Modeling of developable surfaces of three-dimensional geometric objects",
"doi": null,
"abstractUrl": "/proceedings-article/ewdts/2017/08110086/12OmNzV70zC",
"parentPublication": {
"id": "proceedings/ewdts/2017/3299/0",
"title": "2017 IEEE East-West Design & Test Symposium (EWDTS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/03/v0375",
"title": "Meshless Thin-Shell Simulation Based on Global Conformal Parameterization",
"doi": null,
"abstractUrl": "/journal/tg/2006/03/v0375/13rRUxZ0o1m",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/03/09928368",
"title": "Large Growth Deformations of Thin Tissue Using Solid-Shells",
"doi": null,
"abstractUrl": "/journal/tg/2023/03/09928368/1HJuKaJzi36",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ewdts/2019/1003/0/08884456",
"title": "Surface visualization of flexible elastic shells",
"doi": null,
"abstractUrl": "/proceedings-article/ewdts/2019/08884456/1eEUXmGJ5jq",
"parentPublication": {
"id": "proceedings/ewdts/2019/1003/0",
"title": "2019 IEEE East-West Design & Test Symposium (EWDTS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10041940",
"articleId": "1KEtpYenAVW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10044277",
"articleId": "1KL728MHdtu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1KEtpYenAVW",
"doi": "10.1109/TVCG.2023.3244076",
"abstract": "Haptic exoskeleton gloves are a widespread solution for providing force-feedback in Virtual Reality (VR), especially for 3D object manipulations. However, they are still lacking an important feature regarding in-hand haptic sensations: the palmar contact. In this paper, we present PalmEx, a novel approach which incorporates palmar force-feedback into exoskeleton gloves to improve the overall grasping sensations and manual haptic interactions in VR. PalmEx’s concept is demonstrated through a self-contained hardware system augmenting a hand exoskeleton with an encountered palmar contact interface – physically encountering the users’ palm. We build upon current taxonomies to elicit PalmEx’s capabilities for both the exploration and manipulation of virtual objects. We first conduct a technical evaluation optimising the delay between the virtual interactions and their physical counterparts. We then empirically evaluate PalmEx’s proposed design space in a user study (n=12) to assess the potential of a palmar contact for augmenting an exoskeleton. Results show that PalmEx offers the best rendering capabilities to perform believable grasps in VR. PalmEx highlights the importance of the palmar stimulation, and provides a low-cost solution to augment existing high-end consumer hand exoskeletons.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Haptic exoskeleton gloves are a widespread solution for providing force-feedback in Virtual Reality (VR), especially for 3D object manipulations. However, they are still lacking an important feature regarding in-hand haptic sensations: the palmar contact. In this paper, we present PalmEx, a novel approach which incorporates palmar force-feedback into exoskeleton gloves to improve the overall grasping sensations and manual haptic interactions in VR. PalmEx’s concept is demonstrated through a self-contained hardware system augmenting a hand exoskeleton with an encountered palmar contact interface – physically encountering the users’ palm. We build upon current taxonomies to elicit PalmEx’s capabilities for both the exploration and manipulation of virtual objects. We first conduct a technical evaluation optimising the delay between the virtual interactions and their physical counterparts. We then empirically evaluate PalmEx’s proposed design space in a user study (n=12) to assess the potential of a palmar contact for augmenting an exoskeleton. Results show that PalmEx offers the best rendering capabilities to perform believable grasps in VR. PalmEx highlights the importance of the palmar stimulation, and provides a low-cost solution to augment existing high-end consumer hand exoskeletons.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Haptic exoskeleton gloves are a widespread solution for providing force-feedback in Virtual Reality (VR), especially for 3D object manipulations. However, they are still lacking an important feature regarding in-hand haptic sensations: the palmar contact. In this paper, we present PalmEx, a novel approach which incorporates palmar force-feedback into exoskeleton gloves to improve the overall grasping sensations and manual haptic interactions in VR. PalmEx’s concept is demonstrated through a self-contained hardware system augmenting a hand exoskeleton with an encountered palmar contact interface – physically encountering the users’ palm. We build upon current taxonomies to elicit PalmEx’s capabilities for both the exploration and manipulation of virtual objects. We first conduct a technical evaluation optimising the delay between the virtual interactions and their physical counterparts. We then empirically evaluate PalmEx’s proposed design space in a user study (n=12) to assess the potential of a palmar contact for augmenting an exoskeleton. Results show that PalmEx offers the best rendering capabilities to perform believable grasps in VR. PalmEx highlights the importance of the palmar stimulation, and provides a low-cost solution to augment existing high-end consumer hand exoskeletons.",
"title": "PalmEx: Adding Palmar Force-Feedback for 3D Manipulation with Haptic Exoskeleton Gloves",
"normalizedTitle": "PalmEx: Adding Palmar Force-Feedback for 3D Manipulation with Haptic Exoskeleton Gloves",
"fno": "10041940",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Exoskeletons",
"Haptic Interfaces",
"Three Dimensional Displays",
"Taxonomy",
"Training",
"Wearable Computers",
"Avatars",
"Haptics",
"Virtual Reality",
"Artefact",
"Exoskeleton",
"ETHD",
"Encountered Type Of Haptic Device",
"On Demand"
],
"authors": [
{
"givenName": "Elodie",
"surname": "Bouzbib",
"fullName": "Elodie Bouzbib",
"affiliation": "Inria, CNRS, Univ Rennes, IRISA – Rennes, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Marc",
"surname": "Teyssier",
"fullName": "Marc Teyssier",
"affiliation": "Léonard de Vinci Pôle Universitaire, Research Center – Paris La Défense, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Thomas",
"surname": "Howard",
"fullName": "Thomas Howard",
"affiliation": "CNRS, Univ Rennes, Inria, IRISA – Rennes, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Claudio",
"surname": "Pacchierotti",
"fullName": "Claudio Pacchierotti",
"affiliation": "CNRS, Univ Rennes, Inria, IRISA – Rennes, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anatole",
"surname": "Lécuyer",
"fullName": "Anatole Lécuyer",
"affiliation": "Inria, CNRS, Univ Rennes, IRISA – Rennes, France",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-02-01 00:00:00",
"pubType": "trans",
"pages": "1-8",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/haptic/2006/0226/0/01627127",
"title": "Performance Enhancement of a Haptic Arm Exoskeleton",
"doi": null,
"abstractUrl": "/proceedings-article/haptic/2006/01627127/12OmNA1DMm0",
"parentPublication": {
"id": "proceedings/haptic/2006/0226/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2006/0226/0/02260058",
"title": "Performance Enhancement of a Haptic Arm Exoskeleton",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2006/02260058/12OmNscxj93",
"parentPublication": {
"id": "proceedings/haptics/2006/0226/0",
"title": "2006 14th Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/whc/2005/2310/0/23100195",
"title": "A New Force-Feedback Arm Exoskeleton for Haptic Interaction in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/whc/2005/23100195/12OmNzRqdE2",
"parentPublication": {
"id": "proceedings/whc/2005/2310/0",
"title": "Proceedings. First Joint Eurohaptics Conference and Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems. World Haptics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699269",
"title": "Designing Haptics: Comparing Two Virtual Reality Gloves with Respect to Realism, Performance and Comfort",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699269/19F1OMxa4Kc",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a504",
"title": "Supporting Playful Rehabilitation in the Home using Virtual Reality Headsets and Force Feedback Gloves",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a504/1CJceq2Bdvi",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2022/6814/0/681400a155",
"title": "MagGlove: A Haptic Glove with Movable Magnetic Force for Manipulation Learning",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2022/681400a155/1I6RMpw0Hgk",
"parentPublication": {
"id": "proceedings/cw/2022/6814/0",
"title": "2022 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/chase/2022/9476/0/947600a162",
"title": "A Wearable Soft Exoskeleton for Shoulder Motion Assistance",
"doi": null,
"abstractUrl": "/proceedings-article/chase/2022/947600a162/1JjykWViHvi",
"parentPublication": {
"id": "proceedings/chase/2022/9476/0",
"title": "2022 IEEE/ACM Conference on Connected Health: Applications, Systems and Engineering Technologies (CHASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090641",
"title": "Docking Haptics: Dynamic Combinations Of Grounded And Worn Devices",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090641/1jIxtkAPJi8",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2020/9574/0/957400a830",
"title": "Vision-Based Autonomous Walking in a Lower-Limb Powered Exoskeleton",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2020/957400a830/1pBMn9swlOM",
"parentPublication": {
"id": "proceedings/bibe/2020/9574/0",
"title": "2020 IEEE 20th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2021/05/09535175",
"title": "Propping Up Virtual Reality With Haptic Proxies",
"doi": null,
"abstractUrl": "/magazine/cg/2021/05/09535175/1wMEVevVsg8",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10041994",
"articleId": "1KEtoFykgN2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10043789",
"articleId": "1KJsjepjFi8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1KJsjWls2Bi",
"name": "ttg555501-010041940s1-supp1-3244076.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010041940s1-supp1-3244076.mp4",
"extension": "mp4",
"size": "104 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1KEtoFykgN2",
"doi": "10.1109/TVCG.2023.3243834",
"abstract": "We present a hybrid machine learning and flow analysis feature detection method, RipViz, to extract rip currents from stationary videos. Rip currents are dangerous strong currents that can drag beachgoers out to sea. Most people are either unaware of them or do not know what they look like. In some instances, even trained personnel such as lifeguards have difficulty identifying them. RipViz produces a simple, easy to understand visualization of rip location overlaid on the source video. With RipViz, we first obtain an unsteady 2D vector field from the stationary video using optical flow. Movement at each pixel is analyzed over time. At each seed point, sequences of short pathlines, rather a single long pathline, are traced across the frames of the video to better capture the quasi-periodic flow behavior of wave activity. Because of the motion on the beach, the surf zone, and the surrounding areas, these pathlines may still appear very cluttered and incomprehensible. Furthermore, lay audiences are not familiar with pathlines and may not know how to interpret them. To address this, we treat rip currents as a flow anomaly in an otherwise normal flow. To learn about the normal flow behavior, we train an LSTM autoencoder with pathline sequences from normal ocean, foreground, and background movements. During test time, we use the trained LSTM autoencoder to detect anomalous pathlines (i.e., those in the rip zone). The origination points of such anomalous pathlines, over the course of the video, are then presented as points within the rip zone. RipViz is fully automated and does not require user input. Feedback from domain expert suggests that RipViz has the potential for wider use.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a hybrid machine learning and flow analysis feature detection method, RipViz, to extract rip currents from stationary videos. Rip currents are dangerous strong currents that can drag beachgoers out to sea. Most people are either unaware of them or do not know what they look like. In some instances, even trained personnel such as lifeguards have difficulty identifying them. RipViz produces a simple, easy to understand visualization of rip location overlaid on the source video. With RipViz, we first obtain an unsteady 2D vector field from the stationary video using optical flow. Movement at each pixel is analyzed over time. At each seed point, sequences of short pathlines, rather a single long pathline, are traced across the frames of the video to better capture the quasi-periodic flow behavior of wave activity. Because of the motion on the beach, the surf zone, and the surrounding areas, these pathlines may still appear very cluttered and incomprehensible. Furthermore, lay audiences are not familiar with pathlines and may not know how to interpret them. To address this, we treat rip currents as a flow anomaly in an otherwise normal flow. To learn about the normal flow behavior, we train an LSTM autoencoder with pathline sequences from normal ocean, foreground, and background movements. During test time, we use the trained LSTM autoencoder to detect anomalous pathlines (i.e., those in the rip zone). The origination points of such anomalous pathlines, over the course of the video, are then presented as points within the rip zone. RipViz is fully automated and does not require user input. Feedback from domain expert suggests that RipViz has the potential for wider use.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a hybrid machine learning and flow analysis feature detection method, RipViz, to extract rip currents from stationary videos. Rip currents are dangerous strong currents that can drag beachgoers out to sea. Most people are either unaware of them or do not know what they look like. In some instances, even trained personnel such as lifeguards have difficulty identifying them. RipViz produces a simple, easy to understand visualization of rip location overlaid on the source video. With RipViz, we first obtain an unsteady 2D vector field from the stationary video using optical flow. Movement at each pixel is analyzed over time. At each seed point, sequences of short pathlines, rather a single long pathline, are traced across the frames of the video to better capture the quasi-periodic flow behavior of wave activity. Because of the motion on the beach, the surf zone, and the surrounding areas, these pathlines may still appear very cluttered and incomprehensible. Furthermore, lay audiences are not familiar with pathlines and may not know how to interpret them. To address this, we treat rip currents as a flow anomaly in an otherwise normal flow. To learn about the normal flow behavior, we train an LSTM autoencoder with pathline sequences from normal ocean, foreground, and background movements. During test time, we use the trained LSTM autoencoder to detect anomalous pathlines (i.e., those in the rip zone). The origination points of such anomalous pathlines, over the course of the video, are then presented as points within the rip zone. RipViz is fully automated and does not require user input. Feedback from domain expert suggests that RipViz has the potential for wider use.",
"title": "RipViz: Finding Rip Currents by Learning Pathline Behavior",
"normalizedTitle": "RipViz: Finding Rip Currents by Learning Pathline Behavior",
"fno": "10041994",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Videos",
"Behavioral Sciences",
"Visualization",
"Optical Flow",
"Noise Measurement",
"Bathymetry",
"Task Analysis",
"2 D Unsteady Flow Fields",
"Anomaly Detection",
"Flow Visualization",
"LSTM Autoencoders",
"Pathlines"
],
"authors": [
{
"givenName": "Akila de",
"surname": "Silva",
"fullName": "Akila de Silva",
"affiliation": "UC Santa Cruz",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mona",
"surname": "Zhao",
"fullName": "Mona Zhao",
"affiliation": "UC Santa Cruz",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Donald",
"surname": "Stewart",
"fullName": "Donald Stewart",
"affiliation": "UC Santa Cruz",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Fahim",
"surname": "Hasan",
"fullName": "Fahim Hasan",
"affiliation": "UC Santa Cruz",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Gregory",
"surname": "Dusek",
"fullName": "Gregory Dusek",
"affiliation": "NOAA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "James",
"surname": "Davis",
"fullName": "James Davis",
"affiliation": "UC Santa Cruz",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Alex",
"surname": "Pang",
"fullName": "Alex Pang",
"affiliation": "UC Santa Cruz",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2023-02-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2010/4109/0/4109c270",
"title": "An Iterative Method for Superresolution of Optical Flow Derived by Energy Minimisation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109c270/12OmNApcuuo",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2018/2526/0/08368468",
"title": "Reblur2Deblur: Deblurring videos via self-supervised learning",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2018/08368468/12OmNBNM8YK",
"parentPublication": {
"id": "proceedings/iccp/2018/2526/0",
"title": "2018 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pvg/2001/7223/0/72230037",
"title": "Case Study: Visualizing Ocean Currents with Color and Dithering",
"doi": null,
"abstractUrl": "/proceedings-article/pvg/2001/72230037/12OmNqBbHBg",
"parentPublication": {
"id": "proceedings/pvg/2001/7223/0",
"title": "Parallel and Large-Data Visualization and Graphics, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/latw/2011/1489/0/05985908",
"title": "Behavioral-level thermal- and aging-estimation flow",
"doi": null,
"abstractUrl": "/proceedings-article/latw/2011/05985908/12OmNwHQB9Y",
"parentPublication": {
"id": "proceedings/latw/2011/1489/0",
"title": "2011 12th Latin American Test Workshop (LATW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1995/08/i0765",
"title": "Analysis of Camera Behavior During Tracking",
"doi": null,
"abstractUrl": "/journal/tp/1995/08/i0765/13rRUxYrbVy",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2005/02/v0113",
"title": "Accelerated Unsteady Flow Line Integral Convolution",
"doi": null,
"abstractUrl": "/journal/tg/2005/02/v0113/13rRUyuegh2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2019/4941/0/08933578",
"title": "Unsteady Flow Visualization via Physics Based Pathline Exploration",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2019/08933578/1fTgILVAEIE",
"parentPublication": {
"id": "proceedings/vis/2019/4941/0",
"title": "2019 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150666",
"title": "Joint Learning of Blind Video Denoising and Optical Flow Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150666/1lPHlywV4xW",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icaice/2020/9146/0/914600a292",
"title": "Indoor Privacy-preserving Action Recognition via Partially Coupled Convolutional Neural Network",
"doi": null,
"abstractUrl": "/proceedings-article/icaice/2020/914600a292/1rCg61MBavu",
"parentPublication": {
"id": "proceedings/icaice/2020/9146/0",
"title": "2020 International Conference on Artificial Intelligence and Computer Engineering (ICAICE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/12/09626625",
"title": "Optical Flow in the Dark",
"doi": null,
"abstractUrl": "/journal/tp/2022/12/09626625/1yNcX71Hlxm",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10041718",
"articleId": "1KCLOHXFKWQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10041940",
"articleId": "1KEtpYenAVW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1KJsiRmY8Lu",
"name": "ttg555501-010041994s1-supp1-3243834.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010041994s1-supp1-3243834.pdf",
"extension": "pdf",
"size": "5.32 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1KCLOHXFKWQ",
"doi": "10.1109/TVCG.2023.3243668",
"abstract": "One of the fundamental problems in neurobiological research is to understand how neural circuits generate behaviors in response to sensory stimuli. Elucidating such neural circuits requires anatomical and functional information about the neurons that are active during the processing of the sensory information and generation of the respective response, as well as an identification of the connections between these neurons. With modern imaging techniques, both morphological properties of individual neurons as well as functional information related to sensory processing, information integration and behavior can be obtained. Given the resulting information, neurobiologists are faced with the task of identifying the anatomical structures down to individual neurons that are linked to the studied behavior and the processing of the respective sensory stimuli. Here, we present a novel interactive tool that assists neurobiologists in the aforementioned task by allowing them to extract hypothetical neural circuits constrained by anatomical and functional data. Our approach is based on two types of structural data: brain regions that are anatomically or functionally defined, and morphologies of individual neurons. Both types of structural data are interlinked and augmented with additional information. The presented tool allows the expert user to identify neurons using Boolean queries. The interactive formulation of these queries is supported by linked views, using, among other things, two novel 2D abstractions of neural circuits. The approach was validated in two case studies investigating the neural basis of vision-based behavioral responses in zebrafish larvae. Despite this particular application, we believe that the presented tool will be of general interest for exploring hypotheses about neural circuits in other species, genera and taxa.",
"abstracts": [
{
"abstractType": "Regular",
"content": "One of the fundamental problems in neurobiological research is to understand how neural circuits generate behaviors in response to sensory stimuli. Elucidating such neural circuits requires anatomical and functional information about the neurons that are active during the processing of the sensory information and generation of the respective response, as well as an identification of the connections between these neurons. With modern imaging techniques, both morphological properties of individual neurons as well as functional information related to sensory processing, information integration and behavior can be obtained. Given the resulting information, neurobiologists are faced with the task of identifying the anatomical structures down to individual neurons that are linked to the studied behavior and the processing of the respective sensory stimuli. Here, we present a novel interactive tool that assists neurobiologists in the aforementioned task by allowing them to extract hypothetical neural circuits constrained by anatomical and functional data. Our approach is based on two types of structural data: brain regions that are anatomically or functionally defined, and morphologies of individual neurons. Both types of structural data are interlinked and augmented with additional information. The presented tool allows the expert user to identify neurons using Boolean queries. The interactive formulation of these queries is supported by linked views, using, among other things, two novel 2D abstractions of neural circuits. The approach was validated in two case studies investigating the neural basis of vision-based behavioral responses in zebrafish larvae. Despite this particular application, we believe that the presented tool will be of general interest for exploring hypotheses about neural circuits in other species, genera and taxa.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "One of the fundamental problems in neurobiological research is to understand how neural circuits generate behaviors in response to sensory stimuli. Elucidating such neural circuits requires anatomical and functional information about the neurons that are active during the processing of the sensory information and generation of the respective response, as well as an identification of the connections between these neurons. With modern imaging techniques, both morphological properties of individual neurons as well as functional information related to sensory processing, information integration and behavior can be obtained. Given the resulting information, neurobiologists are faced with the task of identifying the anatomical structures down to individual neurons that are linked to the studied behavior and the processing of the respective sensory stimuli. Here, we present a novel interactive tool that assists neurobiologists in the aforementioned task by allowing them to extract hypothetical neural circuits constrained by anatomical and functional data. Our approach is based on two types of structural data: brain regions that are anatomically or functionally defined, and morphologies of individual neurons. Both types of structural data are interlinked and augmented with additional information. The presented tool allows the expert user to identify neurons using Boolean queries. The interactive formulation of these queries is supported by linked views, using, among other things, two novel 2D abstractions of neural circuits. The approach was validated in two case studies investigating the neural basis of vision-based behavioral responses in zebrafish larvae. Despite this particular application, we believe that the presented tool will be of general interest for exploring hypotheses about neural circuits in other species, genera and taxa.",
"title": "A Visual Interface for Exploring Hypotheses about Neural Circuits",
"normalizedTitle": "A Visual Interface for Exploring Hypotheses about Neural Circuits",
"fno": "10041718",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Neurons",
"Neural Circuits",
"Visualization",
"Imaging",
"Image Reconstruction",
"Behavioral Sciences",
"Data Visualization",
"Human Centered Computing",
"Visualization Systems And Tools",
"Web Based Interaction",
"Scientific Visualization"
],
"authors": [
{
"givenName": "Sumit K.",
"surname": "Vohra",
"fullName": "Sumit K. Vohra",
"affiliation": "Department of Visual and Data-Centric Computing, Zuse Institute Berlin (ZIB), Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Philipp",
"surname": "Harth",
"fullName": "Philipp Harth",
"affiliation": "Department of Visual and Data-Centric Computing, Zuse Institute Berlin (ZIB), Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yasuko",
"surname": "Isoe",
"fullName": "Yasuko Isoe",
"affiliation": "Department of Molecular and Cellular Biology, Harvard University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Armin",
"surname": "Bahl",
"fullName": "Armin Bahl",
"affiliation": "Centre for the Advanced Study of Collective Behaviour, University of Konstanz, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Haleh",
"surname": "Fotowat",
"fullName": "Haleh Fotowat",
"affiliation": "Department of Molecular and Cellular Biology, Harvard University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Florian",
"surname": "Engert",
"fullName": "Florian Engert",
"affiliation": "Department of Molecular and Cellular Biology, Harvard University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hans-Christian",
"surname": "Hege",
"fullName": "Hans-Christian Hege",
"affiliation": "Department of Visual and Data-Centric Computing, Zuse Institute Berlin (ZIB), Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Daniel",
"surname": "Baum",
"fullName": "Daniel Baum",
"affiliation": "Department of Visual and Data-Centric Computing, Zuse Institute Berlin (ZIB), Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-02-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/bibe/2007/1509/0/04375644",
"title": "Inferring Behavioral-level Circuits of Caenorhabditis elegans from the Topology of its Wiring Diagram",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2007/04375644/12OmNCfjeC6",
"parentPublication": {
"id": "proceedings/bibe/2007/1509/0",
"title": "7th IEEE International Conference on Bioinformatics and Bioengineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/biovis/2012/4729/0/06378577",
"title": "Interactive extraction of neural structures with user-guided morphological diffusion",
"doi": null,
"abstractUrl": "/proceedings-article/biovis/2012/06378577/12OmNvq5jva",
"parentPublication": {
"id": "proceedings/biovis/2012/4729/0",
"title": "2012 IEEE Symposium on Biological Data Visualization (BioVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/async/2012/4688/0/4688a025",
"title": "A Digital Neurosynaptic Core Using Event-Driven QDI Circuits",
"doi": null,
"abstractUrl": "/proceedings-article/async/2012/4688a025/12OmNx7G64k",
"parentPublication": {
"id": "proceedings/async/2012/4688/0",
"title": "2012 IEEE 18th International Symposium on Asynchronous Circuits and Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/biovis/2013/1658/0/06664349",
"title": "neuroMAP — Interactive graph-visualization of the fruit fly's neural circuit",
"doi": null,
"abstractUrl": "/proceedings-article/biovis/2013/06664349/12OmNy2agJf",
"parentPublication": {
"id": "proceedings/biovis/2013/1658/0",
"title": "2013 IEEE Symposium on Biological Data Visualization (BioVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2011/935/0/05742370",
"title": "The Neuron Navigator: Exploring the information pathway through the neural maze",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2011/05742370/12OmNzxgHEL",
"parentPublication": {
"id": "proceedings/pacificvis/2011/935/0",
"title": "2011 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08017617",
"title": "A Virtual Reality Visualization Tool for Neuron Tracing",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017617/13rRUwI5U2O",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08017643",
"title": "Abstractocyte: A Visual Tool for Exploring Nanoscale Astroglial Cells",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017643/13rRUwI5U7Y",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000b111",
"title": "Deep Sparse Coding for Invariant Multimodal Halle Berry Neurons",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000b111/17D45WYQJ9h",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpca/2022/2027/0/202700a633",
"title": "NeuroSync: A Scalable and Accurate Brain Simulator Using Safe and Efficient Speculation",
"doi": null,
"abstractUrl": "/proceedings-article/hpca/2022/202700a633/1Ds0hBf2cuc",
"parentPublication": {
"id": "proceedings/hpca/2022/2027/0",
"title": "2022 IEEE International Symposium on High-Performance Computer Architecture (HPCA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse/2022/9221/0/922100a338",
"title": "Causality-Based Neural Network Repair",
"doi": null,
"abstractUrl": "/proceedings-article/icse/2022/922100a338/1EmsnTTm9lC",
"parentPublication": {
"id": "proceedings/icse/2022/9221/0",
"title": "2022 IEEE/ACM 44th International Conference on Software Engineering (ICSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10038574",
"articleId": "1KxPXyC69b2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10041726",
"articleId": "1KCLOxAKjkI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1KEtpK1AijK",
"name": "ttg555501-010041718s1-supp1-3243668.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010041718s1-supp1-3243668.pdf",
"extension": "pdf",
"size": "767 kB",
"__typename": "WebExtraType"
},
{
"id": "1KEtpuzA6k0",
"name": "ttg555501-010041718s1-supp2-3243668.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010041718s1-supp2-3243668.mp4",
"extension": "mp4",
"size": "141 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1KCLOxAKjkI",
"doi": "10.1109/TVCG.2023.3243228",
"abstract": "Diagnosing the cluster-based performance of large-scale deep neural network (DNN) models during training is essential for improving training efficiency and reducing resource consumption. However, it remains challenging due to the incomprehensibility of the parallelization strategy and the sheer volume of complex data generated in the training processes. Prior works visually analyze performance profiles and timeline traces to identify anomalies from the perspective of individual devices in the cluster, which is not amenable for studying the root cause of anomalies. In this paper, we present a visual analytics approach that empowers analysts to visually explore the parallel training process of a DNN model and interactively diagnose the root cause of a performance issue. A set of design requirements is gathered through discussions with domain experts. We propose an enhanced execution flow of model operators for illustrating parallelization strategies within the computational graph layout. We design and implement an enhanced Marey's graph representation, which introduces the concept of time-span and a banded visual metaphor to convey training dynamics and help experts identify inefficient training processes. We also propose a visual aggregation technique to improve visualization efficiency. We evaluate our approach using case studies, a user study and expert interviews on two large-scale models run in a cluster, namely, the PanGu-<inline-formula><tex-math notation=\"LaTeX\">Z_$\\alpha$_Z</tex-math></inline-formula> 13B model (40 layers), and the Resnet model (50 layers).",
"abstracts": [
{
"abstractType": "Regular",
"content": "Diagnosing the cluster-based performance of large-scale deep neural network (DNN) models during training is essential for improving training efficiency and reducing resource consumption. However, it remains challenging due to the incomprehensibility of the parallelization strategy and the sheer volume of complex data generated in the training processes. Prior works visually analyze performance profiles and timeline traces to identify anomalies from the perspective of individual devices in the cluster, which is not amenable for studying the root cause of anomalies. In this paper, we present a visual analytics approach that empowers analysts to visually explore the parallel training process of a DNN model and interactively diagnose the root cause of a performance issue. A set of design requirements is gathered through discussions with domain experts. We propose an enhanced execution flow of model operators for illustrating parallelization strategies within the computational graph layout. We design and implement an enhanced Marey's graph representation, which introduces the concept of time-span and a banded visual metaphor to convey training dynamics and help experts identify inefficient training processes. We also propose a visual aggregation technique to improve visualization efficiency. We evaluate our approach using case studies, a user study and expert interviews on two large-scale models run in a cluster, namely, the PanGu-<inline-formula><tex-math notation=\"LaTeX\">$\\alpha$</tex-math></inline-formula> 13B model (40 layers), and the Resnet model (50 layers).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Diagnosing the cluster-based performance of large-scale deep neural network (DNN) models during training is essential for improving training efficiency and reducing resource consumption. However, it remains challenging due to the incomprehensibility of the parallelization strategy and the sheer volume of complex data generated in the training processes. Prior works visually analyze performance profiles and timeline traces to identify anomalies from the perspective of individual devices in the cluster, which is not amenable for studying the root cause of anomalies. In this paper, we present a visual analytics approach that empowers analysts to visually explore the parallel training process of a DNN model and interactively diagnose the root cause of a performance issue. A set of design requirements is gathered through discussions with domain experts. We propose an enhanced execution flow of model operators for illustrating parallelization strategies within the computational graph layout. We design and implement an enhanced Marey's graph representation, which introduces the concept of time-span and a banded visual metaphor to convey training dynamics and help experts identify inefficient training processes. We also propose a visual aggregation technique to improve visualization efficiency. We evaluate our approach using case studies, a user study and expert interviews on two large-scale models run in a cluster, namely, the PanGu-- 13B model (40 layers), and the Resnet model (50 layers).",
"title": "Visual Diagnostics of Parallel Performance in Training Large-Scale DNN Models",
"normalizedTitle": "Visual Diagnostics of Parallel Performance in Training Large-Scale DNN Models",
"fno": "10041726",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Training",
"Data Visualization",
"Computational Modeling",
"Solid Modeling",
"Parallel Processing",
"Performance Evaluation",
"Data Models",
"Deep Neural Network",
"Model Training",
"Parallel Performance",
"Visual Analysis"
],
"authors": [
{
"givenName": "Yating",
"surname": "Wei",
"fullName": "Yating Wei",
"affiliation": "The State Key Lab of CAD & CG, Zhejiang University, Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhiyong",
"surname": "Wang",
"fullName": "Zhiyong Wang",
"affiliation": "The State Key Lab of CAD & CG, Zhejiang University, Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhongwei",
"surname": "Wang",
"fullName": "Zhongwei Wang",
"affiliation": "The State Key Lab of CAD & CG, Zhejiang University, Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yong",
"surname": "Dai",
"fullName": "Yong Dai",
"affiliation": "The State Key Lab of CAD & CG, Zhejiang University, Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Gongchang",
"surname": "Ou",
"fullName": "Gongchang Ou",
"affiliation": "Distributed Data Lab, Huawei Technologies Co., Ltd., Shenzhen, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Han",
"surname": "Gao",
"fullName": "Han Gao",
"affiliation": "Distributed Data Lab, Huawei Technologies Co., Ltd., Shenzhen, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Haitao",
"surname": "Yang",
"fullName": "Haitao Yang",
"affiliation": "Distributed Data Lab, Huawei Technologies Co., Ltd., Shenzhen, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yue",
"surname": "Wang",
"fullName": "Yue Wang",
"affiliation": "The State Key Lab of CAD & CG, Zhejiang University, Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Caleb Chen",
"surname": "Cao",
"fullName": "Caleb Chen Cao",
"affiliation": "The State Key Lab of CAD & CG, Zhejiang University, Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Luoxuan",
"surname": "Weng",
"fullName": "Luoxuan Weng",
"affiliation": "The State Key Lab of CAD & CG, Zhejiang University, Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jiaying",
"surname": "Lu",
"fullName": "Jiaying Lu",
"affiliation": "The State Key Lab of CAD & CG, Zhejiang University, Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rongchen",
"surname": "Zhu",
"fullName": "Rongchen Zhu",
"affiliation": "The State Key Lab of CAD & CG, Zhejiang University, Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wei",
"surname": "Chen",
"fullName": "Wei Chen",
"affiliation": "The State Key Lab of CAD & CG, Zhejiang University, Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-02-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/sc/2018/8384/0/838400a807",
"title": "Exploring Flexible Communications for Streamlining DNN Ensemble Training Pipelines",
"doi": null,
"abstractUrl": "/proceedings-article/sc/2018/838400a807/17D45XERmlD",
"parentPublication": {
"id": "proceedings/sc/2018/8384/0",
"title": "2018 SC18: The International Conference for High Performance Computing, Networking, Storage, and Analysis (SC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2023/04/10040900",
"title": "Expediting Distributed DNN Training With Device Topology-Aware Graph Deployment",
"doi": null,
"abstractUrl": "/journal/td/2023/04/10040900/1KB9yUMFXdS",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpads/2022/7315/0/731500a161",
"title": "Optimizing Resource Allocation in Pipeline Parallelism for Distributed DNN Training",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2022/731500a161/1LRlY0t8Q8M",
"parentPublication": {
"id": "proceedings/icpads/2023/7315/0",
"title": "2022 IEEE 28th International Conference on Parallel and Distributed Systems (ICPADS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-dss-smartcity-dependsys/2022/1993/0/199300b342",
"title": "A Layer-Based Sparsification Method For Distributed DNN Training",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-dss-smartcity-dependsys/2022/199300b342/1LSPHvtS2ac",
"parentPublication": {
"id": "proceedings/hpcc-dss-smartcity-dependsys/2022/1993/0",
"title": "2022 IEEE 24th Int Conf on High Performance Computing & Communications; 8th Int Conf on Data Science & Systems; 20th Int Conf on Smart City; 8th Int Conf on Dependability in Sensor, Cloud & Big Data Systems & Application (HPCC/DSS/SmartCity/DependSys)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hipc/2022/9423/0/942300a032",
"title": "AccDP: Accelerated Data-Parallel Distributed DNN Training for Modern GPU-Based HPC Clusters",
"doi": null,
"abstractUrl": "/proceedings-article/hipc/2022/942300a032/1MEXggdyM48",
"parentPublication": {
"id": "proceedings/hipc/2022/9423/0",
"title": "2022 IEEE 29th International Conference on High Performance Computing, Data, and Analytics (HiPC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/bd/2022/02/08922719",
"title": "GradientFlow: Optimizing Network Performance for Large-Scale Distributed DNN Training",
"doi": null,
"abstractUrl": "/journal/bd/2022/02/08922719/1fvZcmU7LtS",
"parentPublication": {
"id": "trans/bd",
"title": "IEEE Transactions on Big Data",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smartcloud/2020/6547/0/654700a165",
"title": "A Dynamic Scaling Scheme of Cloud-based DNN Training Clusters",
"doi": null,
"abstractUrl": "/proceedings-article/smartcloud/2020/654700a165/1p6f3KZOT9S",
"parentPublication": {
"id": "proceedings/smartcloud/2020/6547/0",
"title": "2020 IEEE International Conference on Smart Cloud (SmartCloud)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/2022/02/09336272",
"title": "λ<italic>DNN</italic>: Achieving Predictable Distributed DNN Training With Serverless Architectures",
"doi": null,
"abstractUrl": "/journal/tc/2022/02/09336272/1qHMWukjg1q",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "letters/ca/2021/01/09436036",
"title": "Deep Partitioned Training From Near-Storage Computing to DNN Accelerators",
"doi": null,
"abstractUrl": "/journal/ca/2021/01/09436036/1tJs7Og8cr6",
"parentPublication": {
"id": "letters/ca",
"title": "IEEE Computer Architecture Letters",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipdps/2021/4066/0/406600b025",
"title": "Pase: Parallelization Strategies for Efficient DNN Training",
"doi": null,
"abstractUrl": "/proceedings-article/ipdps/2021/406600b025/1uOwbKccA0g",
"parentPublication": {
"id": "proceedings/ipdps/2021/4066/0",
"title": "2021 IEEE International Parallel and Distributed Processing Symposium (IPDPS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10038574",
"articleId": "1KxPXyC69b2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10041718",
"articleId": "1KCLOHXFKWQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1KxPXyC69b2",
"doi": "10.1109/TVCG.2023.3242293",
"abstract": "We propose Unified Model of Saliency and Scanpaths (UMSS)-a model that learns to predict multi-duration saliency and scanpaths (i.e. sequences of eye fixations) on information visualisations. Although scanpaths provide rich information about the importance of different visualisation elements during the visual exploration process, prior work has been limited to predicting aggregated attention statistics, such as visual saliency. We present in-depth analyses of gaze behaviour for different information visualisation elements (e.g. Title, Label, Data) on the popular MASSVIS dataset. We show that while, overall, gaze patterns are surprisingly consistent across visualisations and viewers, there are also structural differences in gaze dynamics for different elements. Informed by our analyses, UMSS first predicts multi-duration element-level saliency maps, then probabilistically samples scanpaths from them. Extensive experiments on MASSVIS show that our method consistently outperforms state-of-the-art methods with respect to several, widely used scanpath and saliency evaluation metrics. Our method achieves a relative improvement in sequence score of 11.5% for scanpath prediction, and a relative improvement in Pearson correlation coefficient of up to 23.6 These results are auspicious and point towards richer user models and simulations of visual attention on visualisations without the need for any eye tracking equipment.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose Unified Model of Saliency and Scanpaths (UMSS)-a model that learns to predict multi-duration saliency and scanpaths (i.e. sequences of eye fixations) on information visualisations. Although scanpaths provide rich information about the importance of different visualisation elements during the visual exploration process, prior work has been limited to predicting aggregated attention statistics, such as visual saliency. We present in-depth analyses of gaze behaviour for different information visualisation elements (e.g. Title, Label, Data) on the popular MASSVIS dataset. We show that while, overall, gaze patterns are surprisingly consistent across visualisations and viewers, there are also structural differences in gaze dynamics for different elements. Informed by our analyses, UMSS first predicts multi-duration element-level saliency maps, then probabilistically samples scanpaths from them. Extensive experiments on MASSVIS show that our method consistently outperforms state-of-the-art methods with respect to several, widely used scanpath and saliency evaluation metrics. Our method achieves a relative improvement in sequence score of 11.5% for scanpath prediction, and a relative improvement in Pearson correlation coefficient of up to 23.6 These results are auspicious and point towards richer user models and simulations of visual attention on visualisations without the need for any eye tracking equipment.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose Unified Model of Saliency and Scanpaths (UMSS)-a model that learns to predict multi-duration saliency and scanpaths (i.e. sequences of eye fixations) on information visualisations. Although scanpaths provide rich information about the importance of different visualisation elements during the visual exploration process, prior work has been limited to predicting aggregated attention statistics, such as visual saliency. We present in-depth analyses of gaze behaviour for different information visualisation elements (e.g. Title, Label, Data) on the popular MASSVIS dataset. We show that while, overall, gaze patterns are surprisingly consistent across visualisations and viewers, there are also structural differences in gaze dynamics for different elements. Informed by our analyses, UMSS first predicts multi-duration element-level saliency maps, then probabilistically samples scanpaths from them. Extensive experiments on MASSVIS show that our method consistently outperforms state-of-the-art methods with respect to several, widely used scanpath and saliency evaluation metrics. Our method achieves a relative improvement in sequence score of 11.5% for scanpath prediction, and a relative improvement in Pearson correlation coefficient of up to 23.6 These results are auspicious and point towards richer user models and simulations of visual attention on visualisations without the need for any eye tracking equipment.",
"title": "Scanpath Prediction on Information Visualisations",
"normalizedTitle": "Scanpath Prediction on Information Visualisations",
"fno": "10038574",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visualization",
"Data Visualization",
"Predictive Models",
"Computational Modeling",
"Hidden Markov Models",
"Gaze Tracking",
"Task Analysis",
"Scanpath Prediction",
"Visual Saliency",
"Visual Attention",
"MASSVIS",
"Gaze Behaviour Analysis"
],
"authors": [
{
"givenName": "Yao",
"surname": "Wang",
"fullName": "Yao Wang",
"affiliation": "Institute for Visualisation and Interactive Systems, University of Stuttgart, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mihai Bâ",
"surname": "ce",
"fullName": "Mihai Bâ ce",
"affiliation": "Institute for Visualisation and Interactive Systems, University of Stuttgart, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Andreas",
"surname": "Bulling",
"fullName": "Andreas Bulling",
"affiliation": "Institute for Visualisation and Interactive Systems, University of Stuttgart, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-02-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/etvis/2016/4731/0/07851165",
"title": "Visualizing eye tracking data with gaze-guided slit-scans",
"doi": null,
"abstractUrl": "/proceedings-article/etvis/2016/07851165/12OmNB8kHPk",
"parentPublication": {
"id": "proceedings/etvis/2016/4731/0",
"title": "2016 IEEE Second Workshop on Eye Tracking and Visualization (ETVIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2017/6067/0/08019507",
"title": "Scanpath mining of eye movement trajectories for visual attention analysis",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2017/08019507/12OmNs5rkUx",
"parentPublication": {
"id": "proceedings/icme/2017/6067/0",
"title": "2017 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000f333",
"title": "Gaze Prediction in Dynamic 360° Immersive Videos",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000f333/17D45VW8brT",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714046",
"title": "ScanGAN360: A Generative Model of Realistic Scanpaths for 360° Images",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714046/1B0Y1GfEIQ8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/5555/01/09802919",
"title": "Continuous Gaze Tracking With Implicit Saliency-Aware Calibration on Mobile Devices",
"doi": null,
"abstractUrl": "/journal/tm/5555/01/09802919/1Eo1vvDggH6",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900f006",
"title": "ScanpathNet: A Recurrent Mixture Density Network for Scanpath Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900f006/1G574odZPjO",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2019/9214/0/921400a633",
"title": "Classifying Autism Spectrum Disorder Based on Scanpaths and Saliency",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2019/921400a633/1cJ0zzhughy",
"parentPublication": {
"id": "proceedings/icmew/2019/9214/0",
"title": "2019 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/06/08918321",
"title": "Visual Scanpath Prediction Using IOR-ROI Recurrent Mixture Density Network",
"doi": null,
"abstractUrl": "/journal/tp/2021/06/08918321/1gKtH4OLgdO",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/12/09117053",
"title": "Evaluation of Saccadic Scanpath Prediction: Subjective Assessment Database and Recurrent Neural Network Based Metric",
"doi": null,
"abstractUrl": "/journal/tp/2021/12/09117053/1kGfFYImQVi",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900k0871",
"title": "Predicting Human Scanpaths in Visual Question Answering",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900k0871/1yeHNTgYWNW",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10036089",
"articleId": "1KsSEkjUN1e",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10041726",
"articleId": "1KCLOxAKjkI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1KzA3TgcGo8",
"name": "ttg555501-010038574s1-supp1-3242293.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010038574s1-supp1-3242293.pdf",
"extension": "pdf",
"size": "7.43 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1KsSEkjUN1e",
"doi": "10.1109/TVCG.2023.3241894",
"abstract": "In this work, we propose a stroke-based hairstyle editing network, dubbed HairstyleNet, allowing users to conveniently change the hairstyles of an image in an interactive fashion. Different from previous works, we simplify the hairstyle editing process where users can manipulate local or entire hairstyles by adjusting the parameterized hair regions. Our HairstyleNet consists of two stages: a stroke parameterization stage and a stroke-to-hair generation stage. In the stroke parameterization stage, we firstly introduce parametric strokes to approximate the hair wisps, where the stroke shape is controlled by a quadratic Bézier curve and a thickness parameter. Since rendering strokes with thickness to an image is not differentiable, we opt to leverage a neural renderer to construct the mapping from stroke parameters to a stroke image. Thus, the stroke parameters can be directly estimated from hair regions in a differentiable way, enabling us to flexibly edit the hairstyles of input images. In the stroke-to-hair generation stage, we design a hairstyle refinement network that first encodes coarsely composed images of hair strokes, face, and background into latent representations and then generates high-fidelity face images with desirable new hairstyles from the latent codes. Extensive experiments demonstrate that our HairstyleNet achieves state-of-the-art performance and allows flexible hairstyle manipulation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this work, we propose a stroke-based hairstyle editing network, dubbed HairstyleNet, allowing users to conveniently change the hairstyles of an image in an interactive fashion. Different from previous works, we simplify the hairstyle editing process where users can manipulate local or entire hairstyles by adjusting the parameterized hair regions. Our HairstyleNet consists of two stages: a stroke parameterization stage and a stroke-to-hair generation stage. In the stroke parameterization stage, we firstly introduce parametric strokes to approximate the hair wisps, where the stroke shape is controlled by a quadratic Bézier curve and a thickness parameter. Since rendering strokes with thickness to an image is not differentiable, we opt to leverage a neural renderer to construct the mapping from stroke parameters to a stroke image. Thus, the stroke parameters can be directly estimated from hair regions in a differentiable way, enabling us to flexibly edit the hairstyles of input images. In the stroke-to-hair generation stage, we design a hairstyle refinement network that first encodes coarsely composed images of hair strokes, face, and background into latent representations and then generates high-fidelity face images with desirable new hairstyles from the latent codes. Extensive experiments demonstrate that our HairstyleNet achieves state-of-the-art performance and allows flexible hairstyle manipulation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this work, we propose a stroke-based hairstyle editing network, dubbed HairstyleNet, allowing users to conveniently change the hairstyles of an image in an interactive fashion. Different from previous works, we simplify the hairstyle editing process where users can manipulate local or entire hairstyles by adjusting the parameterized hair regions. Our HairstyleNet consists of two stages: a stroke parameterization stage and a stroke-to-hair generation stage. In the stroke parameterization stage, we firstly introduce parametric strokes to approximate the hair wisps, where the stroke shape is controlled by a quadratic Bézier curve and a thickness parameter. Since rendering strokes with thickness to an image is not differentiable, we opt to leverage a neural renderer to construct the mapping from stroke parameters to a stroke image. Thus, the stroke parameters can be directly estimated from hair regions in a differentiable way, enabling us to flexibly edit the hairstyles of input images. In the stroke-to-hair generation stage, we design a hairstyle refinement network that first encodes coarsely composed images of hair strokes, face, and background into latent representations and then generates high-fidelity face images with desirable new hairstyles from the latent codes. Extensive experiments demonstrate that our HairstyleNet achieves state-of-the-art performance and allows flexible hairstyle manipulation.",
"title": "HairStyle Editing via Parametric Controllable Strokes",
"normalizedTitle": "HairStyle Editing via Parametric Controllable Strokes",
"fno": "10036089",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Hair",
"Image Color Analysis",
"Shape",
"Stroke Medical Condition",
"Rendering Computer Graphics",
"Faces",
"Pipelines",
"Stroke Controllable",
"Parameterized Hair Strokes",
"Hairstyle Editing",
"Hairstyle Transfer"
],
"authors": [
{
"givenName": "Xinhui",
"surname": "Song",
"fullName": "Xinhui Song",
"affiliation": "Fuxi AI Lab of Netease, Inc., HangZhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chen",
"surname": "Liu",
"fullName": "Chen Liu",
"affiliation": "University of Queensland, Brisbane, Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Youyi",
"surname": "Zheng",
"fullName": "Youyi Zheng",
"affiliation": "Zhejiang University, HangZhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zunlei",
"surname": "Feng",
"fullName": "Zunlei Feng",
"affiliation": "Zhejiang University, HangZhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lincheng",
"surname": "Li",
"fullName": "Lincheng Li",
"affiliation": "Fuxi AI Lab of Netease, Inc., HangZhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kun",
"surname": "Zhou",
"fullName": "Kun Zhou",
"affiliation": "Zhejiang University, HangZhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xin",
"surname": "Yu",
"fullName": "Xin Yu",
"affiliation": "University of Queensland, Brisbane, Australia",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-02-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cgi/2004/2171/0/21710608",
"title": "Sketch Interface Based Expressive Hairstyle Modelling and Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/2004/21710608/12OmNweBUN2",
"parentPublication": {
"id": "proceedings/cgi/2004/2171/0",
"title": "Proceedings. Computer Graphics International",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2017/2089/0/2089a214",
"title": "NPR Hair Modeling with Parametric Clumps",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2017/2089a214/12OmNxuo0jO",
"parentPublication": {
"id": "proceedings/cw/2017/2089/0",
"title": "2017 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmi/2002/1834/0/18340535",
"title": "An Improved Algorithm for Hairstyle Dynamics",
"doi": null,
"abstractUrl": "/proceedings-article/icmi/2002/18340535/12OmNxwWoUQ",
"parentPublication": {
"id": "proceedings/icmi/2002/1834/0",
"title": "Proceedings Fourth IEEE International Conference on Multimodal Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/02/v0131",
"title": "Real-Time Animation of Complex Hairstyles",
"doi": null,
"abstractUrl": "/journal/tg/2006/02/v0131/13rRUxZzAhw",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2021/3176/0/09667038",
"title": "Hairstyle Transfer between Face Images",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2021/09667038/1A6BEOBJyq4",
"parentPublication": {
"id": "proceedings/fg/2021/3176/0",
"title": "2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2018/8497/0/849700a008",
"title": "Data-Driven Hair Modeling from a Single Image",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2018/849700a008/1a3x7jLsFPi",
"parentPublication": {
"id": "proceedings/icvrv/2018/8497/0",
"title": "2018 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/07/08964443",
"title": "DeepSketchHair: Deep Sketch-Based 3D Hair Modeling",
"doi": null,
"abstractUrl": "/journal/tg/2021/07/08964443/1gLZSnCp3Ko",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2019/4752/0/09212824",
"title": "Automatic Hair Modeling from One Image",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2019/09212824/1nHRUrDMgE0",
"parentPublication": {
"id": "proceedings/icvrv/2019/4752/0",
"title": "2019 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/04/09220808",
"title": "Real-Time Hair Simulation With Neural Interpolation",
"doi": null,
"abstractUrl": "/journal/tg/2022/04/09220808/1nRLElyFvfG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900b984",
"title": "LOHO: Latent Optimization of Hairstyles via Orthogonalization",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900b984/1yeIuaT2Ife",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10034850",
"articleId": "1KpxdJPurhm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10038574",
"articleId": "1KxPXyC69b2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1KzA4BeBQJO",
"name": "ttg555501-010036089s1-supp1-3241894.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010036089s1-supp1-3241894.mp4",
"extension": "mp4",
"size": "1.59 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1KpxdALb4By",
"doi": "10.1109/TVCG.2023.3241581",
"abstract": "Analyzing user behavior from usability evaluation can be a challenging and time-consuming task, especially as the number of participants and the scale and complexity of the evaluation grows. We propose UXSENSE, a visual analytics system using machine learning methods to extract user behavior from audio and video recordings as parallel time-stamped data streams. Our implementation draws on pattern recognition, computer vision, natural language processing, and machine learning to extract user sentiment, actions, posture, spoken words, and other features from such recordings. These streams are visualized as parallel timelines in a web-based front-end, enabling the researcher to search, filter, and annotate data across time and space. We present the results of a user study involving professional UX researchers evaluating user data using uxSense. In fact, we used uxSense itself to evaluate their sessions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Analyzing user behavior from usability evaluation can be a challenging and time-consuming task, especially as the number of participants and the scale and complexity of the evaluation grows. We propose UXSENSE, a visual analytics system using machine learning methods to extract user behavior from audio and video recordings as parallel time-stamped data streams. Our implementation draws on pattern recognition, computer vision, natural language processing, and machine learning to extract user sentiment, actions, posture, spoken words, and other features from such recordings. These streams are visualized as parallel timelines in a web-based front-end, enabling the researcher to search, filter, and annotate data across time and space. We present the results of a user study involving professional UX researchers evaluating user data using uxSense. In fact, we used uxSense itself to evaluate their sessions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Analyzing user behavior from usability evaluation can be a challenging and time-consuming task, especially as the number of participants and the scale and complexity of the evaluation grows. We propose UXSENSE, a visual analytics system using machine learning methods to extract user behavior from audio and video recordings as parallel time-stamped data streams. Our implementation draws on pattern recognition, computer vision, natural language processing, and machine learning to extract user sentiment, actions, posture, spoken words, and other features from such recordings. These streams are visualized as parallel timelines in a web-based front-end, enabling the researcher to search, filter, and annotate data across time and space. We present the results of a user study involving professional UX researchers evaluating user data using uxSense. In fact, we used uxSense itself to evaluate their sessions.",
"title": "uxSense: Supporting User Experience Analysis with Visualization and Computer Vision",
"normalizedTitle": "uxSense: Supporting User Experience Analysis with Visualization and Computer Vision",
"fno": "10034833",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Annotations",
"Data Visualization",
"Feature Extraction",
"Usability",
"Visual Analytics",
"Measurement",
"Gaze Tracking",
"Visualization",
"Visual Analytics",
"Evaluation",
"Video Analytics",
"Machine Learning",
"Deep Learning",
"Computer Vision"
],
"authors": [
{
"givenName": "Andrea",
"surname": "Batch",
"fullName": "Andrea Batch",
"affiliation": "University of Maryland, College Park, MD, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yipeng",
"surname": "Ji",
"fullName": "Yipeng Ji",
"affiliation": "University of Waterloo, ON, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mingming",
"surname": "Fan",
"fullName": "Mingming Fan",
"affiliation": "Hong Kong University of Science and Technology (Guangzhou) and Hong Kong University of Science and Technology, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jian",
"surname": "Zhao",
"fullName": "Jian Zhao",
"affiliation": "University of Waterloo, ON, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Niklas",
"surname": "Elmqvist",
"fullName": "Niklas Elmqvist",
"affiliation": "University of Maryland, College Park, MD, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/agile/2012/4804/0/4804a001",
"title": "User Experience Design Goes Agile in Lean Transformation -- A Case Study",
"doi": null,
"abstractUrl": "/proceedings-article/agile/2012/4804a001/12OmNAtst5L",
"parentPublication": {
"id": "proceedings/agile/2012/4804/0",
"title": "2012 Agile Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/apsec/2013/2144/1/2144a535",
"title": "Designing User Experience for Mobile Apps: Long-Term Product Owner Perspective",
"doi": null,
"abstractUrl": "/proceedings-article/apsec/2013/2144a535/12OmNB7LvCm",
"parentPublication": {
"id": "proceedings/apsec/2013/2144/1",
"title": "2013 20th Asia-Pacific Software Engineering Conference (APSEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sose/2013/4944/0/4944a298",
"title": "Cloud Services for Improved User Experience in Sharing Mobile Videos",
"doi": null,
"abstractUrl": "/proceedings-article/sose/2013/4944a298/12OmNBt3qpi",
"parentPublication": {
"id": "proceedings/sose/2013/4944/0",
"title": "2013 IEEE Seventh International Symposium on Service-Oriented System Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/agile/2009/3768/0/3768a011",
"title": "The Importance of Identity and Vision to User Experience Designers on Agile Projects",
"doi": null,
"abstractUrl": "/proceedings-article/agile/2009/3768a011/12OmNx8Ouvv",
"parentPublication": {
"id": "proceedings/agile/2009/3768/0",
"title": "AGILE Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/models/2017/3492/0/3492a229",
"title": "User Experience for Model-Driven Engineering: Challenges and Future Directions",
"doi": null,
"abstractUrl": "/proceedings-article/models/2017/3492a229/12OmNxYtu2w",
"parentPublication": {
"id": "proceedings/models/2017/3492/0",
"title": "2017 ACM/IEEE 20th International Conference on Model Driven Engineering Languages and Systems (MODELS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciibms/2017/6664/0/08279761",
"title": "From usability to user experience",
"doi": null,
"abstractUrl": "/proceedings-article/iciibms/2017/08279761/12OmNzUPpHN",
"parentPublication": {
"id": "proceedings/iciibms/2017/6664/0",
"title": "2017 International Conference on Intelligent Informatics and Biomedical Sciences (ICIIBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aswec/2015/9390/0/9390a175",
"title": "The Next Level of User Experience of Cloud Storage Services: Supporting Collaboration with Social Features",
"doi": null,
"abstractUrl": "/proceedings-article/aswec/2015/9390a175/12OmNzmtWIv",
"parentPublication": {
"id": "proceedings/aswec/2015/9390/0",
"title": "2015 24th Australasian Software Engineering Conference (ASWEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08807301",
"title": "<bold>VisTA</bold>: Integrating Machine Intelligence with <bold>Vis</bold>ualization to Support the Investigation of <bold>T</bold>hink-<bold>A</bold>loud Sessions",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08807301/1cG6uY7sFEs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798144",
"title": "Early Virtual Reality User Experience and Usability Assessment of a Surgical Shape Memory Alloy Aspiration/Irrigation Instrument",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798144/1cJ0OKIZ8yc",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a175",
"title": "UXmood - A Tool to Investigate the User Experience (UX) Based on Multimodal Sentiment Analysis and Information Visualization (InfoVis)",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a175/1cMFbUzGfja",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10029921",
"articleId": "1KmyX4gJuMg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10034850",
"articleId": "1KpxdJPurhm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1KpxdJPurhm",
"doi": "10.1109/TVCG.2023.3241596",
"abstract": "Distributed tracing tools have emerged in recent years to enable operators of modern internet applications to troubleshoot cross-component problems in deployed applications. Due to the rich, detailed diagnostic data captured by distributed tracing tools, effectively presenting this data is important. However, use of visualisation to enable sensemaking of this complex data in distributed tracing tools has received relatively little attention. Consequently, operators struggle to make effective use of existing tools. In this paper we present the first characterisation of distributed tracing visualisation through a qualitative interview study with six practitioners from two large internet companies. Across two rounds of 1-on-1 interviews we use grounded theory coding to establish users, extract concrete use cases and identify shortcomings of existing distributed tracing tools. We derive guidelines for development of future distributed tracing tools and expose several open research problems that have wide reaching implications for visualisation research and other domains.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Distributed tracing tools have emerged in recent years to enable operators of modern internet applications to troubleshoot cross-component problems in deployed applications. Due to the rich, detailed diagnostic data captured by distributed tracing tools, effectively presenting this data is important. However, use of visualisation to enable sensemaking of this complex data in distributed tracing tools has received relatively little attention. Consequently, operators struggle to make effective use of existing tools. In this paper we present the first characterisation of distributed tracing visualisation through a qualitative interview study with six practitioners from two large internet companies. Across two rounds of 1-on-1 interviews we use grounded theory coding to establish users, extract concrete use cases and identify shortcomings of existing distributed tracing tools. We derive guidelines for development of future distributed tracing tools and expose several open research problems that have wide reaching implications for visualisation research and other domains.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Distributed tracing tools have emerged in recent years to enable operators of modern internet applications to troubleshoot cross-component problems in deployed applications. Due to the rich, detailed diagnostic data captured by distributed tracing tools, effectively presenting this data is important. However, use of visualisation to enable sensemaking of this complex data in distributed tracing tools has received relatively little attention. Consequently, operators struggle to make effective use of existing tools. In this paper we present the first characterisation of distributed tracing visualisation through a qualitative interview study with six practitioners from two large internet companies. Across two rounds of 1-on-1 interviews we use grounded theory coding to establish users, extract concrete use cases and identify shortcomings of existing distributed tracing tools. We derive guidelines for development of future distributed tracing tools and expose several open research problems that have wide reaching implications for visualisation research and other domains.",
"title": "A Qualitative Interview Study of Distributed Tracing Visualisation: A Characterisation of Challenges and Opportunities",
"normalizedTitle": "A Qualitative Interview Study of Distributed Tracing Visualisation: A Characterisation of Challenges and Opportunities",
"fno": "10034850",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Interviews",
"Distributed Databases",
"Task Analysis",
"Social Networking Online",
"Microservice Architectures",
"Guidelines",
"Visualisation",
"Distributed Tracing",
"Systems"
],
"authors": [
{
"givenName": "Thomas",
"surname": "Davidson",
"fullName": "Thomas Davidson",
"affiliation": "Max Planck Institute for Software Systems, University of Saarland, Saarbrücken, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Emily",
"surname": "Wall",
"fullName": "Emily Wall",
"affiliation": "Emory University, Atlanta, GA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jonathan",
"surname": "Mace",
"fullName": "Jonathan Mace",
"affiliation": "Max Planck Institute for Software Systems, University of Saarland, Saarbrücken, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/mascots/2009/4927/0/05366158",
"title": "Efficient tracing and performance analysis for large distributed systems",
"doi": null,
"abstractUrl": "/proceedings-article/mascots/2009/05366158/12OmNzEVS0v",
"parentPublication": {
"id": "proceedings/mascots/2009/4927/0",
"title": "2009 IEEE International Symposium on Modeling, Analysis & Simulation of Computer and Telecommunication Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2019/01/08611462",
"title": "Ben Sigelman on Distributed Tracing [Software Engineering Radio]",
"doi": null,
"abstractUrl": "/magazine/so/2019/01/08611462/17D45WB0qb7",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ast/2022/9286/0/928600a001",
"title": "Challenges in Regression Test Selection for End-to-End Testing of Microservice-based Software Systems",
"doi": null,
"abstractUrl": "/proceedings-article/ast/2022/928600a001/1Ehs72Xh05y",
"parentPublication": {
"id": "proceedings/ast/2022/9286/0",
"title": "2022 IEEE/ACM International Conference on Automation of Software Test (AST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse-seip/2022/9590/0/959000a075",
"title": "Automatic Anti-Pattern Detection in Microservice Architectures Based on Distributed Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/icse-seip/2022/959000a075/1Ehsdmm8b96",
"parentPublication": {
"id": "proceedings/icse-seip/2022/9590/0",
"title": "2022 IEEE/ACM 44th International Conference on Software Engineering: Software Engineering in Practice (ICSE-SEIP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cloud/2022/8137/0/813700a179",
"title": "Distributed online extraction of a fluid model for microservice applications using local tracing data",
"doi": null,
"abstractUrl": "/proceedings-article/cloud/2022/813700a179/1G6l8C1W1Nu",
"parentPublication": {
"id": "proceedings/cloud/2022/8137/0",
"title": "2022 IEEE 15th International Conference on Cloud Computing (CLOUD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cloud/2022/8137/0/813700a489",
"title": "Localizing and Explaining Faults in Microservices Using Distributed Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/cloud/2022/813700a489/1G6l9NBJvck",
"parentPublication": {
"id": "proceedings/cloud/2022/8137/0",
"title": "2022 IEEE 15th International Conference on Cloud Computing (CLOUD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/sc/2022/01/08826375",
"title": "Microservices Monitoring with Event Logs and Black Box Execution Tracing",
"doi": null,
"abstractUrl": "/journal/sc/2022/01/08826375/1d6xyVdVCXS",
"parentPublication": {
"id": "trans/sc",
"title": "IEEE Transactions on Services Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cloudcom/2019/5011/0/501100a309",
"title": "Visualisation of Distributed Systems Simulation Made Simple",
"doi": null,
"abstractUrl": "/proceedings-article/cloudcom/2019/501100a309/1h0KuUtyABW",
"parentPublication": {
"id": "proceedings/cloudcom/2019/5011/0",
"title": "2019 IEEE International Conference on Cloud Computing Technology and Science (CloudCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse-seis/2020/7125/0/712500a071",
"title": "Debugging Hiring: What Went Right and What Went Wrong in the Technical Interview Process",
"doi": null,
"abstractUrl": "/proceedings-article/icse-seis/2020/712500a071/1pmQasGSAes",
"parentPublication": {
"id": "proceedings/icse-seis/2020/7125/0",
"title": "2020 IEEE/ACM 42nd International Conference on Software Engineering: Software Engineering in Society (ICSE-SEIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/edocw/2021/4488/0/448800a308",
"title": "Offline Trace Generation for Microservice Observability",
"doi": null,
"abstractUrl": "/proceedings-article/edocw/2021/448800a308/1yZ5zgSByZW",
"parentPublication": {
"id": "proceedings/edocw/2021/4488/0",
"title": "2021 IEEE 25th International Enterprise Distributed Object Computing Workshop (EDOCW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10034833",
"articleId": "1KpxdALb4By",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10036089",
"articleId": "1KsSEkjUN1e",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1Krc7y256ik",
"name": "ttg555501-010034850s1-tvcg-3241596-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010034850s1-tvcg-3241596-mm.zip",
"extension": "zip",
"size": "539 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1KmyX4gJuMg",
"doi": "10.1109/TVCG.2023.3240356",
"abstract": "Multiple-view (MV) visualizations have become ubiquitous for visual communication and exploratory data visualization. However, most existing MV visualizations are designed for the desktop, which can be unsuitable for the continuously evolving displays of varying screen sizes. In this paper, we present a two-stage adaptation framework that supports the automated retargeting and semi-automated tailoring of a desktop MV visualization for rendering on devices with displays of varying sizes. First, we cast layout retargeting as an optimization problem and propose a simulated annealing technique that can automatically preserve the layout of multiple views. Second, we enable fine-tuning for the visual appearance of each view, using a rule-based auto configuration method complemented with an interactive interface for chart-oriented encoding adjustment. To demonstrate the feasibility and expressivity of our proposed approach, we present a gallery of MV visualizations that have been adapted from the desktop to small displays. We also report the result of a user study comparing visualizations generated using our approach with those by existing methods. The outcome indicates that the participants generally prefer visualizations generated using our approach and find them to be easier to use.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Multiple-view (MV) visualizations have become ubiquitous for visual communication and exploratory data visualization. However, most existing MV visualizations are designed for the desktop, which can be unsuitable for the continuously evolving displays of varying screen sizes. In this paper, we present a two-stage adaptation framework that supports the automated retargeting and semi-automated tailoring of a desktop MV visualization for rendering on devices with displays of varying sizes. First, we cast layout retargeting as an optimization problem and propose a simulated annealing technique that can automatically preserve the layout of multiple views. Second, we enable fine-tuning for the visual appearance of each view, using a rule-based auto configuration method complemented with an interactive interface for chart-oriented encoding adjustment. To demonstrate the feasibility and expressivity of our proposed approach, we present a gallery of MV visualizations that have been adapted from the desktop to small displays. We also report the result of a user study comparing visualizations generated using our approach with those by existing methods. The outcome indicates that the participants generally prefer visualizations generated using our approach and find them to be easier to use.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Multiple-view (MV) visualizations have become ubiquitous for visual communication and exploratory data visualization. However, most existing MV visualizations are designed for the desktop, which can be unsuitable for the continuously evolving displays of varying screen sizes. In this paper, we present a two-stage adaptation framework that supports the automated retargeting and semi-automated tailoring of a desktop MV visualization for rendering on devices with displays of varying sizes. First, we cast layout retargeting as an optimization problem and propose a simulated annealing technique that can automatically preserve the layout of multiple views. Second, we enable fine-tuning for the visual appearance of each view, using a rule-based auto configuration method complemented with an interactive interface for chart-oriented encoding adjustment. To demonstrate the feasibility and expressivity of our proposed approach, we present a gallery of MV visualizations that have been adapted from the desktop to small displays. We also report the result of a user study comparing visualizations generated using our approach with those by existing methods. The outcome indicates that the participants generally prefer visualizations generated using our approach and find them to be easier to use.",
"title": "Semi-Automatic Layout Adaptation for Responsive Multiple-View Visualization Design",
"normalizedTitle": "Semi-Automatic Layout Adaptation for Responsive Multiple-View Visualization Design",
"fno": "10029921",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Visualization",
"Layout",
"Mobile Handsets",
"Bars",
"Stacking",
"Design Methodology",
"Layout Adaptation",
"Mobile Devices",
"Multiple View Visualization",
"Responsive Design"
],
"authors": [
{
"givenName": "Wei",
"surname": "Zeng",
"fullName": "Wei Zeng",
"affiliation": "Hong Kong University of Science and Technology (Guangzhou) and the Hong Kong University of Science and Technology, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xi",
"surname": "Chen",
"fullName": "Xi Chen",
"affiliation": "Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yihan",
"surname": "Hou",
"fullName": "Yihan Hou",
"affiliation": "Hong Kong University of Science and Technology (Guangzhou) and the Hong Kong University of Science and Technology, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lingdan",
"surname": "Shao",
"fullName": "Lingdan Shao",
"affiliation": "Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhe",
"surname": "Chu",
"fullName": "Zhe Chu",
"affiliation": "Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Remco",
"surname": "Chang",
"fullName": "Remco Chang",
"affiliation": "Tufts University, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2013/12/ttg2013122396",
"title": "An Interaction Model for Visualizations Beyond The Desktop",
"doi": null,
"abstractUrl": "/journal/tg/2013/12/ttg2013122396/13rRUxBa5nm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08443125",
"title": "Glanceable Visualization: Studies of Data Comparison Performance on Smartwatches",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08443125/17D45XDIXRv",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icst/2022/6679/0/667900a140",
"title": "Automated Repair of Responsive Web Page Layouts",
"doi": null,
"abstractUrl": "/proceedings-article/icst/2022/667900a140/1E2wEvLyhA4",
"parentPublication": {
"id": "proceedings/icst/2022/6679/0",
"title": "2022 IEEE Conference on Software Testing, Verification and Validation (ICST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904451",
"title": "Multi-View Design Patterns and Responsive Visualization for Genomics Data",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904451/1H1gfVbEsiA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904883",
"title": "Effects of View Layout on Situated Analytics for Multiple-View Representations in Immersive Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904883/1H2lc7qemsg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089446",
"title": "Graphical Perception for Immersive Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089446/1jIxfA3tlUk",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a184",
"title": "Effects of Augmented Content’s Placement and Size on User’s Search Experience in Extended Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a184/1pBMk7KgZHy",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552592",
"title": "An Automated Approach to Reasoning About Task-Oriented Insights in Responsive Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552592/1xic0SUdCNO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09555491",
"title": "Semantic Snapping for Guided Multi-View Visualization Design",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09555491/1xjQX1LHQJi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a324",
"title": "Towards In-situ Authoring of AR Visualizations with Mobile Devices",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a324/1yeQJrGq6WI",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10026499",
"articleId": "1KkXscJg6vm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10034833",
"articleId": "1KpxdALb4By",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1KnSrhDcDMk",
"name": "ttg555501-010029921s1-supp1-3240356.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010029921s1-supp1-3240356.mp4",
"extension": "mp4",
"size": "22.4 MB",
"__typename": "WebExtraType"
},
{
"id": "1KnSralSqXK",
"name": "ttg555501-010029921s1-supp2-3240356.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010029921s1-supp2-3240356.pdf",
"extension": "pdf",
"size": "3.69 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1KkXrW23184",
"doi": "10.1109/TVCG.2023.3239951",
"abstract": "Target selection is one of essential operation made available by interaction techniques in virtual reality (VR) environments. However, effectively positioning or selecting occluded objects is under-investigated in VR, especially in the context of high-density or a high-dimensional data visualization with VR. In this paper, we propose <italic>ClockRay</italic>, an occluded-object selection technique that can maximize the intrinsic human wrist rotation skills through the integration of emerging ray selection techniques in VR environments. We describe the design space of the <italic>ClockRay</italic> technique and then evaluate its performance in a series of user studies. Drawing on the experimental results, we discuss the benefits of <italic>ClockRay</italic> compared to two popular ray selection techniques – <italic>RayCursor</italic> and <italic>RayCasting</italic>. Our findings can inform the design of VR-based interactive visualization systems for high-density data.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Target selection is one of essential operation made available by interaction techniques in virtual reality (VR) environments. However, effectively positioning or selecting occluded objects is under-investigated in VR, especially in the context of high-density or a high-dimensional data visualization with VR. In this paper, we propose <italic>ClockRay</italic>, an occluded-object selection technique that can maximize the intrinsic human wrist rotation skills through the integration of emerging ray selection techniques in VR environments. We describe the design space of the <italic>ClockRay</italic> technique and then evaluate its performance in a series of user studies. Drawing on the experimental results, we discuss the benefits of <italic>ClockRay</italic> compared to two popular ray selection techniques – <italic>RayCursor</italic> and <italic>RayCasting</italic>. Our findings can inform the design of VR-based interactive visualization systems for high-density data.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Target selection is one of essential operation made available by interaction techniques in virtual reality (VR) environments. However, effectively positioning or selecting occluded objects is under-investigated in VR, especially in the context of high-density or a high-dimensional data visualization with VR. In this paper, we propose ClockRay, an occluded-object selection technique that can maximize the intrinsic human wrist rotation skills through the integration of emerging ray selection techniques in VR environments. We describe the design space of the ClockRay technique and then evaluate its performance in a series of user studies. Drawing on the experimental results, we discuss the benefits of ClockRay compared to two popular ray selection techniques – RayCursor and RayCasting. Our findings can inform the design of VR-based interactive visualization systems for high-density data.",
"title": "ClockRay: A Wrist-Rotation based Technique for Occluded-Target Selection in Virtual Reality",
"normalizedTitle": "ClockRay: A Wrist-Rotation based Technique for Occluded-Target Selection in Virtual Reality",
"fno": "10026416",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Three Dimensional Displays",
"Wrist",
"Visualization",
"Input Devices",
"Virtual Reality",
"Usability",
"Virtual Reality",
"Object Selection",
"Ray Casting",
"Disambiguation",
"Wrist Rotation",
"3 D Data Visualization"
],
"authors": [
{
"givenName": "Huiyue",
"surname": "Wu",
"fullName": "Huiyue Wu",
"affiliation": "School of Journalism and Communication, Sun Yat-sen University, Guangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiaoxuan",
"surname": "Sun",
"fullName": "Xiaoxuan Sun",
"affiliation": "Sun Yat-sen University, Guangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huawei",
"surname": "Tu",
"fullName": "Huawei Tu",
"affiliation": "La Trobe University, Melbourne, Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiaolong",
"surname": "Zhang",
"fullName": "Xiaolong Zhang",
"affiliation": "Pennsylvania State University, University Park, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2010/6846/0/05444706",
"title": "Evaluating depth perception of photorealistic mixed reality visualizations for occluded objects in outdoor environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2010/05444706/12OmNsd6vhN",
"parentPublication": {
"id": "proceedings/3dui/2010/6846/0",
"title": "2010 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2007/0907/0/04142854",
"title": "Exploring the Effects of Environment Density and Target Visibility on Object Selection in 3D Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2007/04142854/12OmNwK7o7o",
"parentPublication": {
"id": "proceedings/3dui/2007/0907/0",
"title": "2007 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2012/4896/0/4896a350",
"title": "Segmentation Algorithm Study for Infrared Images with Occluded Target Based on Artificial Immune System",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2012/4896a350/12OmNx6PiE9",
"parentPublication": {
"id": "proceedings/cis/2012/4896/0",
"title": "2012 Eighth International Conference on Computational Intelligence and Security",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2012/03/tth2012030240",
"title": "Evaluation of Tactile Feedback Methods for Wrist Rotation Guidance",
"doi": null,
"abstractUrl": "/journal/th/2012/03/tth2012030240/13rRUyoPSPg",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bdva/2018/9194/0/08534024",
"title": "Tangible Braille Plot: Tangibly Exploring Geo-Temporal Data in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/bdva/2018/08534024/17D45WgziPn",
"parentPublication": {
"id": "proceedings/bdva/2018/9194/0",
"title": "2018 International Symposium on Big Data Visual and Immersive Analytics (BDVA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714052",
"title": "PoVRPoint: Authoring Presentations in Mobile Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714052/1B0Y1Tyx2PC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a460",
"title": "Toward Intuitive Acquisition of Occluded VR Objects Through an Interactive Disocclusion Mini-map",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a460/1MNgkshFgXK",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089663",
"title": "Slicing-Volume: Hybrid 3D/2D Multi-target Selection Technique for Dense Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089663/1jIxdJFH8as",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090491",
"title": "Precision vs. Power Grip: A Comparison of Pen Grip Styles for Selection in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090491/1jIxqBC6XqU",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09207831",
"title": "Fully-Occluded Target Selection in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09207831/1nuwDtnSHa8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10026242",
"articleId": "1KdURFLQTMk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10026499",
"articleId": "1KkXscJg6vm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1Kl05qbinmM",
"name": "ttg555501-010026416s1-supp1-3239951.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010026416s1-supp1-3239951.mp4",
"extension": "mp4",
"size": "198 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1KkXscJg6vm",
"doi": "10.1109/TVCG.2023.3240003",
"abstract": "Natural language interfaces (NLIs) enable users to flexibly specify analytical intentions in data visualization. However, diagnosing the visualization results without understanding the underlying generation process is challenging. Our research explores how to provide explanations for NLIs to help users locate the problems and further revise the queries. We present XNLI, an explainable NLI system for visual data analysis. The system introduces a Provenance Generator to reveal the detailed process of visual transformations, a suite of interactive widgets to support error adjustments, and a Hint Generator to provide query revision hints based on the analysis of user queries and interactions. Two usage scenarios of XNLI and a user study verify the effectiveness and usability of the system. Results suggest that XNLI can significantly enhance task accuracy without interrupting the NLI-based analysis process.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Natural language interfaces (NLIs) enable users to flexibly specify analytical intentions in data visualization. However, diagnosing the visualization results without understanding the underlying generation process is challenging. Our research explores how to provide explanations for NLIs to help users locate the problems and further revise the queries. We present XNLI, an explainable NLI system for visual data analysis. The system introduces a Provenance Generator to reveal the detailed process of visual transformations, a suite of interactive widgets to support error adjustments, and a Hint Generator to provide query revision hints based on the analysis of user queries and interactions. Two usage scenarios of XNLI and a user study verify the effectiveness and usability of the system. Results suggest that XNLI can significantly enhance task accuracy without interrupting the NLI-based analysis process.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Natural language interfaces (NLIs) enable users to flexibly specify analytical intentions in data visualization. However, diagnosing the visualization results without understanding the underlying generation process is challenging. Our research explores how to provide explanations for NLIs to help users locate the problems and further revise the queries. We present XNLI, an explainable NLI system for visual data analysis. The system introduces a Provenance Generator to reveal the detailed process of visual transformations, a suite of interactive widgets to support error adjustments, and a Hint Generator to provide query revision hints based on the analysis of user queries and interactions. Two usage scenarios of XNLI and a user study verify the effectiveness and usability of the system. Results suggest that XNLI can significantly enhance task accuracy without interrupting the NLI-based analysis process.",
"title": "XNLI: Explaining and Diagnosing NLI-based Visual Data Analysis",
"normalizedTitle": "XNLI: Explaining and Diagnosing NLI-based Visual Data Analysis",
"fno": "10026499",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Visualization",
"Task Analysis",
"Encoding",
"Data Analysis",
"Motion Pictures",
"Prototypes",
"Natural Language Interface",
"Visual Data Analysis",
"Explainability"
],
"authors": [
{
"givenName": "Yingchaojie",
"surname": "Feng",
"fullName": "Yingchaojie Feng",
"affiliation": "The State Key Lab of CAD & CG, Zhejiang University, Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xingbo",
"surname": "Wang",
"fullName": "Xingbo Wang",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bo",
"surname": "Pan",
"fullName": "Bo Pan",
"affiliation": "The State Key Lab of CAD & CG, Zhejiang University, Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kam Kwai",
"surname": "Wong",
"fullName": "Kam Kwai Wong",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yi",
"surname": "Ren",
"fullName": "Yi Ren",
"affiliation": "The State Key Lab of CAD & CG, Zhejiang University, Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shi",
"surname": "Liu",
"fullName": "Shi Liu",
"affiliation": "The State Key Lab of CAD & CG, Zhejiang University, Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zihan",
"surname": "Yan",
"fullName": "Zihan Yan",
"affiliation": "MIT Media Lab, Cambridge, MA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yuxin",
"surname": "Ma",
"fullName": "Yuxin Ma",
"affiliation": "Department of Computer Science and Engineering, Southern University of Science and Technology, Guangdong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huamin",
"surname": "Qu",
"fullName": "Huamin Qu",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wei",
"surname": "Chen",
"fullName": "Wei Chen",
"affiliation": "Laboratory of Art and Archaeology Image (Zhejiang University), Ministry of Education, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icdm/2014/4302/0/4302a340",
"title": "UnTangle: Visual Mining for Data with Uncertain Multi-labels via Triangle Map",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2014/4302a340/12OmNzA6GSx",
"parentPublication": {
"id": "proceedings/icdm/2014/4302/0",
"title": "2014 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2013/3142/0/3143b093",
"title": "Interactive Data Analysis Tool by Augmenting MATLAB with Semantic Objects",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2013/3143b093/12OmNzkuKBB",
"parentPublication": {
"id": "proceedings/icdmw/2013/3142/0",
"title": "2013 IEEE 13th International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/02/07091015",
"title": "UnTangle Map: Visual Analysis of Probabilistic Multi-Label Data",
"doi": null,
"abstractUrl": "/journal/tg/2016/02/07091015/13rRUxASuAx",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07534787",
"title": "Visualizing Dimension Coverage to Support Exploratory Analysis",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07534787/13rRUxcsYLV",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09699035",
"title": "Towards Natural Language Interfaces for Data Visualization: A Survey",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09699035/1ADJfMYBSCs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09912366",
"title": "Towards Natural Language-Based Visualization Authoring",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09912366/1HeiWkRN3tC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2019/4941/0/08933673",
"title": "VisWall: Visual Data Exploration Using Direct Combination on Large Touch Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2019/08933673/1fTgGNuw1bi",
"parentPublication": {
"id": "proceedings/vis/2019/4941/0",
"title": "2019 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2020/04/09118800",
"title": "How to Ask What to Say?: Strategies for Evaluating Natural Language Interfaces for Data Visualization",
"doi": null,
"abstractUrl": "/magazine/cg/2020/04/09118800/1kHUNLgZhSM",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2020/2903/0/09101356",
"title": "The Pastwatch: On the usability of provenance data in relational databases",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2020/09101356/1kaMB5wtYys",
"parentPublication": {
"id": "proceedings/icde/2020/2903/0",
"title": "2020 IEEE 36th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222342",
"title": "NL4DV: A Toolkit for Generating Analytic Specifications for Data Visualization from Natural Language Queries",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222342/1nTqOo5NR3G",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10026416",
"articleId": "1KkXrW23184",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10029921",
"articleId": "1KmyX4gJuMg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1KdUR4fXJXG",
"doi": "10.1109/TVCG.2023.3239670",
"abstract": "Holographic displays are ideal display technologies for virtual and augmented reality because all visual cues are provided. However, real-time high-quality holographic displays are difficult to achieve because the generation of high-quality computer-generated hologram (CGH) is inefficient in existing algorithms. Here, complex-valued convolutional neural network (CCNN) is proposed for phase-only CGH generation. The CCNN-CGH architecture is effective with a simple network structure based on the character design of complex amplitude. A holographic display prototype is set up for optical reconstruction. Experiments verify that state-of-the-art performance is achieved in terms of quality and generation speed in existing end-to-end neural holography methods using the ideal wave propagation model. The generation speed is three times faster than HoloNet and one-sixth faster than Holo-encoder, and the Peak Signal to Noise Ratio (PSNR) is increased by 3 dB and 9 dB, respectively. Real-time high-quality CGHs are generated in 1920×1072 and 3840×2160 resolutions for dynamic holographic displays.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Holographic displays are ideal display technologies for virtual and augmented reality because all visual cues are provided. However, real-time high-quality holographic displays are difficult to achieve because the generation of high-quality computer-generated hologram (CGH) is inefficient in existing algorithms. Here, complex-valued convolutional neural network (CCNN) is proposed for phase-only CGH generation. The CCNN-CGH architecture is effective with a simple network structure based on the character design of complex amplitude. A holographic display prototype is set up for optical reconstruction. Experiments verify that state-of-the-art performance is achieved in terms of quality and generation speed in existing end-to-end neural holography methods using the ideal wave propagation model. The generation speed is three times faster than HoloNet and one-sixth faster than Holo-encoder, and the Peak Signal to Noise Ratio (PSNR) is increased by 3 dB and 9 dB, respectively. Real-time high-quality CGHs are generated in 1920×1072 and 3840×2160 resolutions for dynamic holographic displays.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Holographic displays are ideal display technologies for virtual and augmented reality because all visual cues are provided. However, real-time high-quality holographic displays are difficult to achieve because the generation of high-quality computer-generated hologram (CGH) is inefficient in existing algorithms. Here, complex-valued convolutional neural network (CCNN) is proposed for phase-only CGH generation. The CCNN-CGH architecture is effective with a simple network structure based on the character design of complex amplitude. A holographic display prototype is set up for optical reconstruction. Experiments verify that state-of-the-art performance is achieved in terms of quality and generation speed in existing end-to-end neural holography methods using the ideal wave propagation model. The generation speed is three times faster than HoloNet and one-sixth faster than Holo-encoder, and the Peak Signal to Noise Ratio (PSNR) is increased by 3 dB and 9 dB, respectively. Real-time high-quality CGHs are generated in 1920×1072 and 3840×2160 resolutions for dynamic holographic displays.",
"title": "Real-time High-Quality Computer-Generated Hologram Using Complex-Valued Convolutional Neural Network",
"normalizedTitle": "Real-time High-Quality Computer-Generated Hologram Using Complex-Valued Convolutional Neural Network",
"fno": "10025829",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Neural Networks",
"Computer Architecture",
"Three Dimensional Displays",
"Deep Learning",
"Image Reconstruction",
"Holography",
"Convolution",
"Holography",
"Neural Models",
"Virtual And Augmented Reality"
],
"authors": [
{
"givenName": "Chongli",
"surname": "Zhong",
"fullName": "Chongli Zhong",
"affiliation": "State Key Laboratory of Information Photonics and Optical Communications, Beijing University of Posts and Telecommunications, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xinzhu",
"surname": "Sang",
"fullName": "Xinzhu Sang",
"affiliation": "State Key Laboratory of Information Photonics and Optical Communications, Beijing University of Posts and Telecommunications, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Binbin",
"surname": "Yan",
"fullName": "Binbin Yan",
"affiliation": "State Key Laboratory of Information Photonics and Optical Communications, Beijing University of Posts and Telecommunications, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hui",
"surname": "Li",
"fullName": "Hui Li",
"affiliation": "Beijing National Research Center for Information Science and Technology, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Duo",
"surname": "Chen",
"fullName": "Duo Chen",
"affiliation": "State Key Laboratory of Information Photonics and Optical Communications, Beijing University of Posts and Telecommunications, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiujuan",
"surname": "Qin",
"fullName": "Xiujuan Qin",
"affiliation": "State Key Laboratory of Information Photonics and Optical Communications, Beijing University of Posts and Telecommunications, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shuo",
"surname": "Chen",
"fullName": "Shuo Chen",
"affiliation": "State Key Laboratory of Information Photonics and Optical Communications, Beijing University of Posts and Telecommunications, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiaoqian",
"surname": "Ye",
"fullName": "Xiaoqian Ye",
"affiliation": "State Key Laboratory of Information Photonics and Optical Communications, Beijing University of Posts and Telecommunications, Beijing, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-11",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icmew/2016/1552/0/07574699",
"title": "Computer generated hologram from Multiview-plus-Depth data considering specular reflections",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2016/07574699/12OmNwDSdFk",
"parentPublication": {
"id": "proceedings/icmew/2016/1552/0",
"title": "2016 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2017/2937/0/2937a318",
"title": "Drawing Abrasive Hologram Animations with Auto-Generated Scratch Patterns",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2017/2937a318/12OmNwlZu0Q",
"parentPublication": {
"id": "proceedings/ism/2017/2937/0",
"title": "2017 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/candar/2017/2087/0/2087a589",
"title": "Acceleration of Large-Scale CGH Generation Using Multi-GPU Cluster",
"doi": null,
"abstractUrl": "/proceedings-article/candar/2017/2087a589/12OmNwnYFV3",
"parentPublication": {
"id": "proceedings/candar/2017/2087/0",
"title": "2017 Fifth International Symposium on Computing and Networking (CANDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2018/9269/0/926900a120",
"title": "The Virtual Factory: Hologram-Enabled Control and Monitoring of Industrial IoT Devices",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2018/926900a120/17D45W9KVIs",
"parentPublication": {
"id": "proceedings/aivr/2018/9269/0",
"title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a746",
"title": "Metameric Varifocal Holograms",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a746/1CJcc750PQI",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a418",
"title": "Realistic Defocus Blur for Multiplane Computer-Generated Holography",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a418/1MNgFZaCqiI",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a581",
"title": "HoloBeam: Paper-Thin Near-Eye Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a581/1MNgR9rZSCc",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09199563",
"title": "Correcting the Proximity Effect in Nanophotonic Phased Arrays",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09199563/1ncgvG9aJ6o",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a353",
"title": "Proximity Effect Correction for Fresnel Holograms on Nanophotonic Phased Arrays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a353/1tuB1K9iOKk",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523842",
"title": "Gaze-Contingent Retinal Speckle Suppression for Perceptually-Matched Foveated Holographic Displays",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523842/1wpqr1B6wA8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10025400",
"articleId": "1KcgX3ZT4XK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10026242",
"articleId": "1KdURFLQTMk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1KkXghmsfok",
"name": "ttg555501-010025829s1-supp1-3239670.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010025829s1-supp1-3239670.mp4",
"extension": "mp4",
"size": "8.15 MB",
"__typename": "WebExtraType"
},
{
"id": "1KkXg8kHgT6",
"name": "ttg555501-010025829s1-supp2-3239670.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010025829s1-supp2-3239670.mp4",
"extension": "mp4",
"size": "7.7 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1KdURFLQTMk",
"doi": "10.1109/TVCG.2023.3239909",
"abstract": "With the increasing pervasiveness of Artificial Intelligence (AI), many visual analytics tools have been proposed to examine fairness, but they mostly focus on data scientist users. Instead, tackling fairness must be inclusive and involve domain experts with specialized tools and workflows. Thus, domain-specific visualizations are needed for algorithmic fairness. Furthermore, while much work on AI fairness has focused on predictive decisions, less has been done for fair allocation and planning, which require human expertise and iterative design to integrate myriad constraints. We propose the Intelligible Fair Allocation (IF-Alloc) Framework that leverages explanations of causal attribution (Why), contrastive (Why Not) and counterfactual reasoning (What If, How To) to aid domain experts to assess and alleviate unfairness in allocation problems. We apply the framework to fair urban planning for designing cities that provide equal access to amenities and benefits for diverse resident types. Specifically, we propose an interactive visual tool, Intelligible Fair City Planner (IF-City), to help urban planners to perceive inequality across groups, identify and attribute sources of inequality, and mitigate inequality with automatic allocation simulations and constraint-satisfying recommendations (IF-Plan). We demonstrate and evaluate the usage and usefulness of IF-City on a real neighborhood in New York City, US, with practicing urban planners from multiple countries, and discuss generalizing our findings, application, and framework to other use cases and applications of fair allocation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the increasing pervasiveness of Artificial Intelligence (AI), many visual analytics tools have been proposed to examine fairness, but they mostly focus on data scientist users. Instead, tackling fairness must be inclusive and involve domain experts with specialized tools and workflows. Thus, domain-specific visualizations are needed for algorithmic fairness. Furthermore, while much work on AI fairness has focused on predictive decisions, less has been done for fair allocation and planning, which require human expertise and iterative design to integrate myriad constraints. We propose the Intelligible Fair Allocation (IF-Alloc) Framework that leverages explanations of causal attribution (Why), contrastive (Why Not) and counterfactual reasoning (What If, How To) to aid domain experts to assess and alleviate unfairness in allocation problems. We apply the framework to fair urban planning for designing cities that provide equal access to amenities and benefits for diverse resident types. Specifically, we propose an interactive visual tool, Intelligible Fair City Planner (IF-City), to help urban planners to perceive inequality across groups, identify and attribute sources of inequality, and mitigate inequality with automatic allocation simulations and constraint-satisfying recommendations (IF-Plan). We demonstrate and evaluate the usage and usefulness of IF-City on a real neighborhood in New York City, US, with practicing urban planners from multiple countries, and discuss generalizing our findings, application, and framework to other use cases and applications of fair allocation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the increasing pervasiveness of Artificial Intelligence (AI), many visual analytics tools have been proposed to examine fairness, but they mostly focus on data scientist users. Instead, tackling fairness must be inclusive and involve domain experts with specialized tools and workflows. Thus, domain-specific visualizations are needed for algorithmic fairness. Furthermore, while much work on AI fairness has focused on predictive decisions, less has been done for fair allocation and planning, which require human expertise and iterative design to integrate myriad constraints. We propose the Intelligible Fair Allocation (IF-Alloc) Framework that leverages explanations of causal attribution (Why), contrastive (Why Not) and counterfactual reasoning (What If, How To) to aid domain experts to assess and alleviate unfairness in allocation problems. We apply the framework to fair urban planning for designing cities that provide equal access to amenities and benefits for diverse resident types. Specifically, we propose an interactive visual tool, Intelligible Fair City Planner (IF-City), to help urban planners to perceive inequality across groups, identify and attribute sources of inequality, and mitigate inequality with automatic allocation simulations and constraint-satisfying recommendations (IF-Plan). We demonstrate and evaluate the usage and usefulness of IF-City on a real neighborhood in New York City, US, with practicing urban planners from multiple countries, and discuss generalizing our findings, application, and framework to other use cases and applications of fair allocation.",
"title": "IF-City: Intelligible Fair City Planning to Measure, Explain and Mitigate Inequality",
"normalizedTitle": "IF-City: Intelligible Fair City Planning to Measure, Explain and Mitigate Inequality",
"fno": "10026242",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Resource Management",
"Visualization",
"Urban Planning",
"Indexes",
"Data Visualization",
"Buildings",
"Sociology",
"Explainable Artificial Intelligence",
"Fairness",
"Intelligibility",
"Resource Allocation",
"Urban Planning"
],
"authors": [
{
"givenName": "Yan",
"surname": "Lyu",
"fullName": "Yan Lyu",
"affiliation": "School of Computer Science and Engineering, Southeast University, Nanjing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hangxin",
"surname": "Lu",
"fullName": "Hangxin Lu",
"affiliation": "Future Cities Laboratory, Singapore-ETH Centre, Singapore, Singapore",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Min Kyung",
"surname": "Lee",
"fullName": "Min Kyung Lee",
"affiliation": "School of Information, The University of Texas at Austin, US",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Gerhard",
"surname": "Schmitt",
"fullName": "Gerhard Schmitt",
"affiliation": "Future Cities Laboratory, Singapore-ETH Centre, Singapore, Singapore",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Brian Y.",
"surname": "Lim",
"fullName": "Brian Y. Lim",
"affiliation": "School of Computing, National University of Singapore, Singapore, Singapore",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-18",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/compsacw/2014/3578/0/3578a043",
"title": "Fair Optimization -- Methodological Foundations of Fairness in Network Resource Allocation",
"doi": null,
"abstractUrl": "/proceedings-article/compsacw/2014/3578a043/12OmNAlvHPi",
"parentPublication": {
"id": "proceedings/compsacw/2014/3578/0",
"title": "2014 IEEE 38th International Computer Software and Applications Conference Workshops (COMPSACW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2015/9393/0/9393a519",
"title": "Neighborhood Communication Technology Based on the Wisdom City Management",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2015/9393a519/12OmNC3o51p",
"parentPublication": {
"id": "proceedings/isdea/2015/9393/0",
"title": "2015 Sixth International Conference on Intelligent Systems Design and Engineering Applications (ISDEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/saner/2016/1855/2/1855b017",
"title": "Using the City Metaphor for Visualizing Test-Related Metrics",
"doi": null,
"abstractUrl": "/proceedings-article/saner/2016/1855b017/12OmNCfjeoN",
"parentPublication": {
"id": "saner/2016/1855/2",
"title": "2016 IEEE 23rd International Conference on Software Analysis, Evolution, and Reengineering (SANER)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iceert/2021/3817/0/381700a240",
"title": "Urban rail transit line and station planning optimization based on GIS : -- Take Haikou city as an example",
"doi": null,
"abstractUrl": "/proceedings-article/iceert/2021/381700a240/1A3jhSPoQnK",
"parentPublication": {
"id": "proceedings/iceert/2021/3817/0",
"title": "2021 International Conference on Information Control, Electrical Engineering and Rail Transit (ICEERT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/annsim/2022/5288/0/09859328",
"title": "How The Urban Microclimate And Outdoor Thermal Comfort Can Affect Intra-City Mobility Patterns: Evidence From New York City",
"doi": null,
"abstractUrl": "/proceedings-article/annsim/2022/09859328/1G4EPdr2los",
"parentPublication": {
"id": "proceedings/annsim/2022/5288/0",
"title": "2022 Annual Modeling and Simulation Conference (ANNSIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iisa/2022/6390/0/09904359",
"title": "Increasing the public space ratio (PSR) using G.I.S.",
"doi": null,
"abstractUrl": "/proceedings-article/iisa/2022/09904359/1H5KmhIfFny",
"parentPublication": {
"id": "proceedings/iisa/2022/6390/0",
"title": "2022 13th International Conference on Information, Intelligence, Systems & Applications (IISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a859",
"title": "Data-Driven City Traffic Planning Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a859/1J7W70fyKDS",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798250",
"title": "Early Stage Digital-Physical Twinning to Engage Citizens with City Planning and Design",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798250/1cJ0OBfYFyM",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vissoft/2020/9914/0/991400a110",
"title": "Memory Cities: Visualizing Heap Memory Evolution Using the Software City Metaphor",
"doi": null,
"abstractUrl": "/proceedings-article/vissoft/2020/991400a110/1olHCFxDCMg",
"parentPublication": {
"id": "proceedings/vissoft/2020/9914/0",
"title": "2020 Working Conference on Software Visualization (VISSOFT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnisc/2021/0232/0/023200a582",
"title": "The Urban City-Industry Integration Degree Evaluation Based on Ordinal Logistic Regression",
"doi": null,
"abstractUrl": "/proceedings-article/icnisc/2021/023200a582/1yLPvBHAGty",
"parentPublication": {
"id": "proceedings/icnisc/2021/0232/0",
"title": "2021 7th Annual International Conference on Network and Information Systems for Computers (ICNISC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10025829",
"articleId": "1KdUR4fXJXG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10026416",
"articleId": "1KkXrW23184",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1KkXrGJ7Zio",
"name": "ttg555501-010026242s1-supp1-3239909.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010026242s1-supp1-3239909.pdf",
"extension": "pdf",
"size": "3.75 MB",
"__typename": "WebExtraType"
},
{
"id": "1KkXpgAKaOI",
"name": "ttg555501-010026242s1-supp3-3239909.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010026242s1-supp3-3239909.mp4",
"extension": "mp4",
"size": "222 MB",
"__typename": "WebExtraType"
},
{
"id": "1KkXlebZ3K8",
"name": "ttg555501-010026242s1-supp2-3239909.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010026242s1-supp2-3239909.mp4",
"extension": "mp4",
"size": "242 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1KcgWzIu8F2",
"doi": "10.1109/TVCG.2023.3239364",
"abstract": "Shading plays an important role in cartoon drawings to present the 3D lighting and depth information in a 2D image to improve the visual information and pleasantness. But it also introduces apparent challenges in analyzing and processing the cartoon drawings for different computer graphics and vision applications, such as segmentation, depth estimation, and relighting. Extensive research has been made in removing or separating the shading information to facilitate these applications. Unfortunately, the existing researches only focused on natural images, which are natively different from cartoons since the shading in natural images is physically correct and can be modeled based on physical priors. However, shading in cartoons is manually created by artists, which may be imprecise, abstract, and stylized. This makes it extremely difficult to model the shading in cartoon drawings. Without modeling the shading prior, in the paper, we propose a learning-based solution to separate the shading from the original colors using a two-branch system consisting of two subnetworks. To the best of our knowledge, our method is the first attempt in separating shading information from cartoon drawings. Our method significantly outperforms the methods tailored for natural images. Extensive evaluations have been performed with convincing results in all cases.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Shading plays an important role in cartoon drawings to present the 3D lighting and depth information in a 2D image to improve the visual information and pleasantness. But it also introduces apparent challenges in analyzing and processing the cartoon drawings for different computer graphics and vision applications, such as segmentation, depth estimation, and relighting. Extensive research has been made in removing or separating the shading information to facilitate these applications. Unfortunately, the existing researches only focused on natural images, which are natively different from cartoons since the shading in natural images is physically correct and can be modeled based on physical priors. However, shading in cartoons is manually created by artists, which may be imprecise, abstract, and stylized. This makes it extremely difficult to model the shading in cartoon drawings. Without modeling the shading prior, in the paper, we propose a learning-based solution to separate the shading from the original colors using a two-branch system consisting of two subnetworks. To the best of our knowledge, our method is the first attempt in separating shading information from cartoon drawings. Our method significantly outperforms the methods tailored for natural images. Extensive evaluations have been performed with convincing results in all cases.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Shading plays an important role in cartoon drawings to present the 3D lighting and depth information in a 2D image to improve the visual information and pleasantness. But it also introduces apparent challenges in analyzing and processing the cartoon drawings for different computer graphics and vision applications, such as segmentation, depth estimation, and relighting. Extensive research has been made in removing or separating the shading information to facilitate these applications. Unfortunately, the existing researches only focused on natural images, which are natively different from cartoons since the shading in natural images is physically correct and can be modeled based on physical priors. However, shading in cartoons is manually created by artists, which may be imprecise, abstract, and stylized. This makes it extremely difficult to model the shading in cartoon drawings. Without modeling the shading prior, in the paper, we propose a learning-based solution to separate the shading from the original colors using a two-branch system consisting of two subnetworks. To the best of our knowledge, our method is the first attempt in separating shading information from cartoon drawings. Our method significantly outperforms the methods tailored for natural images. Extensive evaluations have been performed with convincing results in all cases.",
"title": "Separating Shading and Reflectance from Cartoon Illustrations",
"normalizedTitle": "Separating Shading and Reflectance from Cartoon Illustrations",
"fno": "10025396",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Reflectivity",
"Image Color Analysis",
"Lighting",
"Estimation",
"Visualization",
"Task Analysis",
"Smoothing Methods",
"Cartoon",
"Layer Decomposition",
"Shading Extraction",
"Shading Removal",
"Deep Learning"
],
"authors": [
{
"givenName": "Ziheng",
"surname": "Ma",
"fullName": "Ziheng Ma",
"affiliation": "College of Computer Science and Software Engineering, Shenzhen University, Shenzhen, Guangdong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chengze",
"surname": "Li",
"fullName": "Chengze Li",
"affiliation": "School of Computing and Information Sciences, Caritas Institute of Higher Education, Hong Kong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xueting",
"surname": "Liu",
"fullName": "Xueting Liu",
"affiliation": "College of Computer Science and Software Engineering, Shenzhen University, Shenzhen, Guangdong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huisi",
"surname": "Wu",
"fullName": "Huisi Wu",
"affiliation": "College of Computer Science and Software Engineering, Shenzhen University, Shenzhen, Guangdong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhenkun",
"surname": "Wen",
"fullName": "Zhenkun Wen",
"affiliation": "College of Computer Science and Software Engineering, Shenzhen University, Shenzhen, Guangdong, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-17",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icicta/2015/7644/0/7644a265",
"title": "Cartoon Material Annotation and Retrieval System for Web-Interactive-Service Cartoon Making",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2015/7644a265/12OmNCdk2wA",
"parentPublication": {
"id": "proceedings/icicta/2015/7644/0",
"title": "2015 8th International Conference on Intelligent Computation Technology and Automation (ICICTA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cccrv/2004/2127/0/01301427",
"title": "Recovering the shading image under known illumination",
"doi": null,
"abstractUrl": "/proceedings-article/cccrv/2004/01301427/12OmNvDZF0N",
"parentPublication": {
"id": "proceedings/cccrv/2004/2127/0",
"title": "First Canadian Conference on Computer and Robot Vision, 2004. Proceedings.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2013/5050/0/5050a913",
"title": "Cartoon Rendering Illumination Model Based on Phong",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2013/5050a913/12OmNwoPtun",
"parentPublication": {
"id": "proceedings/icig/2013/5050/0",
"title": "2013 Seventh International Conference on Image and Graphics (ICIG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pbmcv/1995/7021/0/00514684",
"title": "Reflectance analysis under solar illumination",
"doi": null,
"abstractUrl": "/proceedings-article/pbmcv/1995/00514684/12OmNxbW4O4",
"parentPublication": {
"id": "proceedings/pbmcv/1995/7021/0",
"title": "Proceedings of the Workshop on Physics-Based Modeling in Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892247",
"title": "MagicToon: A 2D-to-3D creative cartoon modeling system with mobile AR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892247/12OmNxjjEhC",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457a850",
"title": "Shading Annotations in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457a850/12OmNxuo0id",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse/2014/7981/0/7981b707",
"title": "A Cartoon Image Annotation and Retrieval System Supporting Fast Cartoon Making",
"doi": null,
"abstractUrl": "/proceedings-article/cse/2014/7981b707/12OmNz2kqec",
"parentPublication": {
"id": "proceedings/cse/2014/7981/0",
"title": "2014 IEEE 17th International Conference on Computational Science and Engineering (CSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2015/08/06975182",
"title": "Shape, Illumination, and Reflectance from Shading",
"doi": null,
"abstractUrl": "/journal/tp/2015/08/06975182/13rRUwghd6c",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800b147",
"title": "Precomputed Radiance Transfer for Reflectance and Lighting Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800b147/1qyxlpSwLhC",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800b075",
"title": "Recovering Real-World Reflectance Properties and Shading From HDR Imagery",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800b075/1zWEfggzOaA",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10024310",
"articleId": "1KaBaMU2Iog",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10025400",
"articleId": "1KcgX3ZT4XK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1KdUQQ9814Q",
"name": "ttg555501-010025396s1-supp1-3239364.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010025396s1-supp1-3239364.pdf",
"extension": "pdf",
"size": "4.24 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1KcgX3ZT4XK",
"doi": "10.1109/TVCG.2023.3239370",
"abstract": "Embedding unified skeletons into unregistered scans is fundamental to finding correspondences, depicting motions, and capturing underlying structures among the articulated objects in the same category. Some existing approaches rely on laborious registration to adapt a predefined LBS model to each input, while others require the input to be set to a canonical pose, <italic>e.g.</italic> T-pose or A-pose. However, their effectiveness is always influenced by the water-tightness, face topology, and vertex density of the input mesh. At the core of our approach lies a novel unwrapping method, named SUPPLE  (Spherical UnwraPping ProfiLEs), which maps a surface into image planes independent of mesh topologies. Based on this lower-dimensional representation, a learning-based framework is further designed to localize and connect skeletal joints with fully convolutional architectures. Experiments demonstrate that our framework yields reliable skeleton extractions across a broad range of articulated categories, from raw scans to online CADs.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Embedding unified skeletons into unregistered scans is fundamental to finding correspondences, depicting motions, and capturing underlying structures among the articulated objects in the same category. Some existing approaches rely on laborious registration to adapt a predefined LBS model to each input, while others require the input to be set to a canonical pose, <italic>e.g.</italic> T-pose or A-pose. However, their effectiveness is always influenced by the water-tightness, face topology, and vertex density of the input mesh. At the core of our approach lies a novel unwrapping method, named SUPPLE  (Spherical UnwraPping ProfiLEs), which maps a surface into image planes independent of mesh topologies. Based on this lower-dimensional representation, a learning-based framework is further designed to localize and connect skeletal joints with fully convolutional architectures. Experiments demonstrate that our framework yields reliable skeleton extractions across a broad range of articulated categories, from raw scans to online CADs.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Embedding unified skeletons into unregistered scans is fundamental to finding correspondences, depicting motions, and capturing underlying structures among the articulated objects in the same category. Some existing approaches rely on laborious registration to adapt a predefined LBS model to each input, while others require the input to be set to a canonical pose, e.g. T-pose or A-pose. However, their effectiveness is always influenced by the water-tightness, face topology, and vertex density of the input mesh. At the core of our approach lies a novel unwrapping method, named SUPPLE (Spherical UnwraPping ProfiLEs), which maps a surface into image planes independent of mesh topologies. Based on this lower-dimensional representation, a learning-based framework is further designed to localize and connect skeletal joints with fully convolutional architectures. Experiments demonstrate that our framework yields reliable skeleton extractions across a broad range of articulated categories, from raw scans to online CADs.",
"title": "Skeleton Extraction for Articulated Objects with the Spherical Unwrapping Profiles",
"normalizedTitle": "Skeleton Extraction for Articulated Objects with the Spherical Unwrapping Profiles",
"fno": "10025400",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Skeleton",
"Three Dimensional Displays",
"Feature Extraction",
"Heating Systems",
"Topology",
"Point Cloud Compression",
"Task Analysis",
"Skeleton Embedding",
"Spherical Unwrapping",
"Surface To Image Representation"
],
"authors": [
{
"givenName": "Zimeng",
"surname": "Zhao",
"fullName": "Zimeng Zhao",
"affiliation": "School of Automation, Southeast University, Nanjing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wei",
"surname": "Xie",
"fullName": "Wei Xie",
"affiliation": "School of Automation, Southeast University, Nanjing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Binghui",
"surname": "Zuo",
"fullName": "Binghui Zuo",
"affiliation": "School of Automation, Southeast University, Nanjing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yangang",
"surname": "Wang",
"fullName": "Yangang Wang",
"affiliation": "School of Automation, Southeast University, Nanjing, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-18",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2015/6683/0/6683a094",
"title": "Non-rigid Articulated Point Set Registration for Human Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2015/6683a094/12OmNC2OSPb",
"parentPublication": {
"id": "proceedings/wacv/2015/6683/0",
"title": "2015 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2017/2610/0/261001a038",
"title": "Dynamic High Resolution Deformable Articulated Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2017/261001a038/12OmNqFa5nJ",
"parentPublication": {
"id": "proceedings/3dv/2017/2610/0",
"title": "2017 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2011/0039/0/05759491",
"title": "FAAST: The Flexible Action and Articulated Skeleton Toolkit",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2011/05759491/12OmNxymo4O",
"parentPublication": {
"id": "proceedings/vr/2011/0039/0",
"title": "2011 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/03/ttg2011030368",
"title": "Scan-Based Volume Animation Driven by Locally Adaptive Articulated Registrations",
"doi": null,
"abstractUrl": "/journal/tg/2011/03/ttg2011030368/13rRUwInvB0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2018/09/08025409",
"title": "Highly Articulated Kinematic Structure Estimation Combining Motion and Skeleton Information",
"doi": null,
"abstractUrl": "/journal/tp/2018/09/08025409/13rRUwjGoHh",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200n3189",
"title": "CAPTRA: CAtegory-level Pose Tracking for Rigid and Articulated Objects from Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200n3189/1BmHryRsl56",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccgiv/2022/9250/0/925000a050",
"title": "Unwrapping phase method based on rearranged periodic fragments",
"doi": null,
"abstractUrl": "/proceedings-article/iccgiv/2022/925000a050/1Lxfqvn3h16",
"parentPublication": {
"id": "proceedings/iccgiv/2022/9250/0",
"title": "2022 2nd International Conference on Computer Graphics, Image and Virtualization (ICCGIV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300l1959",
"title": "LBS Autoencoder: Self-Supervised Fitting of Articulated Meshes to Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300l1959/1gyrapLInSw",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800a011",
"title": "A Skeleton-Driven Neural Occupancy Representation for Articulated Hands",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800a011/1zWE7AwRO2A",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800a899",
"title": "SUPPLE: Extracting Hand Skeleton with Spherical Unwrapping Profiles",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800a899/1zWEklpAaJy",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10025396",
"articleId": "1KcgWzIu8F2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10025829",
"articleId": "1KdUR4fXJXG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1Kl06R5ItnG",
"name": "ttg555501-010025400s1-supp1-3239370.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010025400s1-supp1-3239370.pdf",
"extension": "pdf",
"size": "18.5 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1K9stqcyyQM",
"doi": "10.1109/TVCG.2023.3238428",
"abstract": "Virtual product presentations that rely on static images and text are often insufficient to communicate all the information that is necessary to accurately evaluate a product. Technologies such as Virtual Reality (VR) or Augmented Reality (AR) have enabled more sophisticated representation methods, but certain product characteristics are difficult to assess and may result in perceptual differences when a product is evaluated in different visual media. In this paper, we report two case studies in which a group of participants evaluated three designs of two product typologies (i.e., a desktop telephone and a coffee maker) as presented in three different visual media (i.e., photorealistic renderings, AR, and VR for the first case study; and photographs, a non-immersive virtual environment, and AR for the second case study) using eight semantic scales. An inferential statistical method using Aligned Rank Transform (ART) proceedings was applied to determine perceptual differences between groups. Our results show that in both cases product attributes in Jordan's physio-pleasure category are the most affected by the presentation media. The socio-pleasure category was also affected for the case of the coffee makers. The level of immersion afforded by the medium significantly affects product evaluation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual product presentations that rely on static images and text are often insufficient to communicate all the information that is necessary to accurately evaluate a product. Technologies such as Virtual Reality (VR) or Augmented Reality (AR) have enabled more sophisticated representation methods, but certain product characteristics are difficult to assess and may result in perceptual differences when a product is evaluated in different visual media. In this paper, we report two case studies in which a group of participants evaluated three designs of two product typologies (i.e., a desktop telephone and a coffee maker) as presented in three different visual media (i.e., photorealistic renderings, AR, and VR for the first case study; and photographs, a non-immersive virtual environment, and AR for the second case study) using eight semantic scales. An inferential statistical method using Aligned Rank Transform (ART) proceedings was applied to determine perceptual differences between groups. Our results show that in both cases product attributes in Jordan's physio-pleasure category are the most affected by the presentation media. The socio-pleasure category was also affected for the case of the coffee makers. The level of immersion afforded by the medium significantly affects product evaluation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual product presentations that rely on static images and text are often insufficient to communicate all the information that is necessary to accurately evaluate a product. Technologies such as Virtual Reality (VR) or Augmented Reality (AR) have enabled more sophisticated representation methods, but certain product characteristics are difficult to assess and may result in perceptual differences when a product is evaluated in different visual media. In this paper, we report two case studies in which a group of participants evaluated three designs of two product typologies (i.e., a desktop telephone and a coffee maker) as presented in three different visual media (i.e., photorealistic renderings, AR, and VR for the first case study; and photographs, a non-immersive virtual environment, and AR for the second case study) using eight semantic scales. An inferential statistical method using Aligned Rank Transform (ART) proceedings was applied to determine perceptual differences between groups. Our results show that in both cases product attributes in Jordan's physio-pleasure category are the most affected by the presentation media. The socio-pleasure category was also affected for the case of the coffee makers. The level of immersion afforded by the medium significantly affects product evaluation.",
"title": "An Examination of the Relationship between Visualization Media and Consumer Product Evaluation",
"normalizedTitle": "An Examination of the Relationship between Visualization Media and Consumer Product Evaluation",
"fno": "10023985",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Media",
"Visualization",
"Telephone Sets",
"Prototypes",
"Electronic Mail",
"Solid Modeling",
"Rendering Computer Graphics",
"Artificial",
"Augmented",
"And Virtual Realities",
"Virtual Reality",
"Consumer Products",
"Perception And Psychophysics"
],
"authors": [
{
"givenName": "Almudena",
"surname": "Palacios-Ibáñez",
"fullName": "Almudena Palacios-Ibáñez",
"affiliation": "HUMAN-Tech, Universitat Politècnica de València, Valencia, Spain",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Simon",
"surname": "Pirault",
"fullName": "Simon Pirault",
"affiliation": "Universitat Politècnica de València, Valencia, Spain",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Francesc",
"surname": "Ochando-Martí",
"fullName": "Francesc Ochando-Martí",
"affiliation": "Universitat Politècnica de València, Valencia, Spain",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Manuel",
"surname": "Contero",
"fullName": "Manuel Contero",
"affiliation": "HUMAN-Tech, Universitat Politècnica de València, Valencia, Spain",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jorge D.",
"surname": "Camba",
"fullName": "Jorge D. Camba",
"affiliation": "School of Engineering Technology, Purdue University, West Lafayette, IN, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/hicss/2016/5670/0/5670b830",
"title": "The Use of Social Media Tools in the Product Life Cycle Phases: A Systematic Literature Review",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2016/5670b830/12OmNBlXsbf",
"parentPublication": {
"id": "proceedings/hicss/2016/5670/0",
"title": "2016 49th Hawaii International Conference on System Sciences (HICSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2008/3075/0/04438841",
"title": "Relationship between Uncertainty and Patterns of Pre-purchase Consumer Search in Electronic Markets",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2008/04438841/12OmNwFRpbk",
"parentPublication": {
"id": "proceedings/hicss/2008/3075/0",
"title": "Proceedings of the 41st Annual Hawaii International Conference on System Sciences (HICSS 2008)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2007/1016/0/04284833",
"title": "Semantic Analysis and Personalization for Mobile Media Applications",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2007/04284833/12OmNyRg4E7",
"parentPublication": {
"id": "proceedings/icme/2007/1016/0",
"title": "2007 International Conference on Multimedia & Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/weit/2013/3057/0/3057a045",
"title": "Fuzzy a-Ideals of Product Operator on Bounded Fuzzy Lattices",
"doi": null,
"abstractUrl": "/proceedings-article/weit/2013/3057a045/12OmNyY4rpa",
"parentPublication": {
"id": "proceedings/weit/2013/3057/0",
"title": "2013 2nd Workshop-School on Theoretical Computer Science (WEIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iri/2015/6656/0/6656a039",
"title": "Social Media Data Aggregation and Mining for Internet-Scale Customer Relationship Management",
"doi": null,
"abstractUrl": "/proceedings-article/iri/2015/6656a039/12OmNz6iODu",
"parentPublication": {
"id": "proceedings/iri/2015/6656/0",
"title": "2015 IEEE International Conference on Information Reuse and Integration (IRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2014/2504/0/06759110",
"title": "Product Versus Non-product Oriented Social Media Platforms: Online Consumer Opinion Composition and Evolution",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2014/06759110/12OmNz6iOsB",
"parentPublication": {
"id": "proceedings/hicss/2014/2504/0",
"title": "2014 47th Hawaii International Conference on System Sciences (HICSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icebe/2016/6119/0/6119a092",
"title": "Predicting and Visualizing Consumer Sentiments in Online Social Media",
"doi": null,
"abstractUrl": "/proceedings-article/icebe/2016/6119a092/12OmNzUgd6o",
"parentPublication": {
"id": "proceedings/icebe/2016/6119/0",
"title": "2016 IEEE 13th International Conference on e-Business Engineering (ICEBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/is3c/2016/3071/0/3071a393",
"title": "Social Media Analytics Based Product Improvement Framework",
"doi": null,
"abstractUrl": "/proceedings-article/is3c/2016/3071a393/12OmNzcPAJK",
"parentPublication": {
"id": "proceedings/is3c/2016/3071/0",
"title": "2016 International Symposium on Computer, Consumer and Control (IS3C)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2018/5035/0/08622119",
"title": "Unsupervised domain-agnostic identification of product names in social media posts",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2018/08622119/17D45WgziSx",
"parentPublication": {
"id": "proceedings/big-data/2018/5035/0",
"title": "2018 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icime/2018/7616/0/761600a087",
"title": "The Research of User-Based Media Product Innovation in the Mobile Environment",
"doi": null,
"abstractUrl": "/proceedings-article/icime/2018/761600a087/17D45XeKgtu",
"parentPublication": {
"id": "proceedings/icime/2018/7616/0",
"title": "2018 International Joint Conference on Information, Media and Engineering (ICIME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10023999",
"articleId": "1K9ssyL8VvG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10024388",
"articleId": "1KaB9SqICWs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1K9ssyL8VvG",
"doi": "10.1109/TVCG.2023.3238478",
"abstract": "This paper introduces an interaction method allowing virtual reality (VR) users to interact with virtual objects by blowing air. The proposed method allows users to interact with virtual objects in a physically plausible way by recognizing the intensity of the wind generated by the user's actual wind blowing activity in the physical world. This is expected to provide immersed VR experience since it enables users to interact with virtual objects in the same way they do in the real world. Three experiments were carried out to develop and improve this method. In the first experiment, we collected the user's blowing data and used it to model a formula to estimate the speed of the wind from the sound waves obtained through a microphone. In the second experiment, we investigated how much gain can be applied to the formula obtained in the first experiment. The aim is to reduce the lung capacity required to generate wind without compromising physical plausibility. In the third experiment, the advantages and disadvantages of the proposed method compared to the controller-based method were investigated in two scenarios of blowing a ball and a pinwheel. According to the experimental results and participant interview, participants felt a stronger sense of presence and found the VR experience more fun with the proposed blowing interaction method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper introduces an interaction method allowing virtual reality (VR) users to interact with virtual objects by blowing air. The proposed method allows users to interact with virtual objects in a physically plausible way by recognizing the intensity of the wind generated by the user's actual wind blowing activity in the physical world. This is expected to provide immersed VR experience since it enables users to interact with virtual objects in the same way they do in the real world. Three experiments were carried out to develop and improve this method. In the first experiment, we collected the user's blowing data and used it to model a formula to estimate the speed of the wind from the sound waves obtained through a microphone. In the second experiment, we investigated how much gain can be applied to the formula obtained in the first experiment. The aim is to reduce the lung capacity required to generate wind without compromising physical plausibility. In the third experiment, the advantages and disadvantages of the proposed method compared to the controller-based method were investigated in two scenarios of blowing a ball and a pinwheel. According to the experimental results and participant interview, participants felt a stronger sense of presence and found the VR experience more fun with the proposed blowing interaction method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper introduces an interaction method allowing virtual reality (VR) users to interact with virtual objects by blowing air. The proposed method allows users to interact with virtual objects in a physically plausible way by recognizing the intensity of the wind generated by the user's actual wind blowing activity in the physical world. This is expected to provide immersed VR experience since it enables users to interact with virtual objects in the same way they do in the real world. Three experiments were carried out to develop and improve this method. In the first experiment, we collected the user's blowing data and used it to model a formula to estimate the speed of the wind from the sound waves obtained through a microphone. In the second experiment, we investigated how much gain can be applied to the formula obtained in the first experiment. The aim is to reduce the lung capacity required to generate wind without compromising physical plausibility. In the third experiment, the advantages and disadvantages of the proposed method compared to the controller-based method were investigated in two scenarios of blowing a ball and a pinwheel. According to the experimental results and participant interview, participants felt a stronger sense of presence and found the VR experience more fun with the proposed blowing interaction method.",
"title": "VR Blowing: A Physically Plausible Interaction Method for Blowing Air in Virtual Reality",
"normalizedTitle": "VR Blowing: A Physically Plausible Interaction Method for Blowing Air in Virtual Reality",
"fno": "10023999",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Microphones",
"Fluid Flow Measurement",
"Navigation",
"Usability",
"Force",
"Tracking",
"Mouth",
"Blowing Control",
"Human Computer Interaction",
"Immersion",
"Virtual Reality"
],
"authors": [
{
"givenName": "MinYeong",
"surname": "Seo",
"fullName": "MinYeong Seo",
"affiliation": "Department of Software Convergence, Kyung Hee University, Seoul, South Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "KyungEun",
"surname": "Kang",
"fullName": "KyungEun Kang",
"affiliation": "Department of Electronic Engineering and Software Convergence, Kyung Hee University, Seoul, South Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "HyeongYeop",
"surname": "Kang",
"fullName": "HyeongYeop Kang",
"affiliation": "Department of Software Convergence, Kyung Hee University, Seoul, South Korea",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2015/6886/0/07131774",
"title": "Cirque des bouteilles: The art of blowing on bottles",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2015/07131774/12OmNAkWvbr",
"parentPublication": {
"id": "proceedings/3dui/2015/6886/0",
"title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vsmm/2001/1402/0/14020499",
"title": "Development of VR Experiencing System with Hemi-Spherical Immersive Projection Display for Urban Environment Design",
"doi": null,
"abstractUrl": "/proceedings-article/vsmm/2001/14020499/12OmNB0X8uQ",
"parentPublication": {
"id": "proceedings/vsmm/2001/1402/0",
"title": "Proceedings Seventh International Conference on Virtual Systems and MultiMedia Enhanced Realities: Augmented and Unplugged",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcmp-ugc/2010/986/0/06017978",
"title": "Computational Modeling of Geometrically-Complex Weapons Bays",
"doi": null,
"abstractUrl": "/proceedings-article/hpcmp-ugc/2010/06017978/12OmNBp52Fp",
"parentPublication": {
"id": "proceedings/hpcmp-ugc/2010/986/0",
"title": "2010 DoD High Performance Computing Modernization Program Users Group Conference (HPCMP-UGC 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2006/0225/0/02250143",
"title": "The VR Scooter: Wind and Tactile Feedback Improve User Performance",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2006/02250143/12OmNCgrD3N",
"parentPublication": {
"id": "proceedings/3dui/2006/0225/0",
"title": "3D User Interfaces (3DUI'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2015/7644/0/7644a634",
"title": "Performance Analysis of a Kind of Movable Wind-Drive Device",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2015/7644a634/12OmNvTBB6J",
"parentPublication": {
"id": "proceedings/icicta/2015/7644/0",
"title": "2015 8th International Conference on Intelligent Computation Technology and Automation (ICICTA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/2002/1594/0/15940043",
"title": "Randomly Accessible Procedural Animation of Physically Approximate Turbulent Motion",
"doi": null,
"abstractUrl": "/proceedings-article/ca/2002/15940043/12OmNxwWosc",
"parentPublication": {
"id": "proceedings/ca/2002/1594/0",
"title": "Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isuvr/2017/3091/0/3091a001",
"title": "User Study of VR Basic Controller and Data Glove as Hand Gesture Inputs in VR Games",
"doi": null,
"abstractUrl": "/proceedings-article/isuvr/2017/3091a001/12OmNyyeWrp",
"parentPublication": {
"id": "proceedings/isuvr/2017/3091/0",
"title": "2017 International Symposium on Ubiquitous Virtual Reality (ISUVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10066837",
"title": "VR-HandNet: A Visually and Physically Plausible Hand Manipulation System in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10066837/1LtR7JYxVEk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090454",
"title": "The other way: immersive VR storytelling through biking",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090454/1jIxszQHffq",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a045",
"title": "Perception of Multisensory Wind Representation in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a045/1pysxBReola",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10023985",
"articleId": "1K9stqcyyQM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10024005",
"articleId": "1K9ss42cTAI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1KaB9pHxHJC",
"name": "ttg555501-010023999s1-supp1-3238478.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010023999s1-supp1-3238478.mp4",
"extension": "mp4",
"size": "23.2 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1K9ss42cTAI",
"doi": "10.1109/TVCG.2023.3238662",
"abstract": "Most systems for simulating sound propagation in a virtual environment for interactive applications use ray- or path-based models of sound. With these models, the “early” (low-order) specular reflection paths play a key role in defining the “sound” of the environment. However, the wave nature of sound, and the fact that smooth objects are approximated by triangle meshes, pose challenges for creating realistic approximations of the reflection results. Existing methods which produce accurate results are too slow to be used in most interactive applications with dynamic scenes. This paper presents a method for reflections modeling called spatially sampled near-reflective diffraction (SSNRD), based on an existing approximate diffraction model, Volumetric Diffraction and Transmission (VDaT). The SSNRD model addresses the challenges mentioned above, produces results accurate to within 1-2 dB on average compared to edge diffraction, and is fast enough to generate thousands of paths in a few milliseconds in large scenes. This method encompasses scene geometry processing, path trajectory generation, spatial sampling for diffraction modeling, and a small deep neural network (DNN) to produce the final response of each path. All steps of the method are GPU-accelerated, and NVIDIA RTX real-time ray tracing hardware is used for spatial computing tasks beyond just traditional ray tracing.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Most systems for simulating sound propagation in a virtual environment for interactive applications use ray- or path-based models of sound. With these models, the “early” (low-order) specular reflection paths play a key role in defining the “sound” of the environment. However, the wave nature of sound, and the fact that smooth objects are approximated by triangle meshes, pose challenges for creating realistic approximations of the reflection results. Existing methods which produce accurate results are too slow to be used in most interactive applications with dynamic scenes. This paper presents a method for reflections modeling called spatially sampled near-reflective diffraction (SSNRD), based on an existing approximate diffraction model, Volumetric Diffraction and Transmission (VDaT). The SSNRD model addresses the challenges mentioned above, produces results accurate to within 1-2 dB on average compared to edge diffraction, and is fast enough to generate thousands of paths in a few milliseconds in large scenes. This method encompasses scene geometry processing, path trajectory generation, spatial sampling for diffraction modeling, and a small deep neural network (DNN) to produce the final response of each path. All steps of the method are GPU-accelerated, and NVIDIA RTX real-time ray tracing hardware is used for spatial computing tasks beyond just traditional ray tracing.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Most systems for simulating sound propagation in a virtual environment for interactive applications use ray- or path-based models of sound. With these models, the “early” (low-order) specular reflection paths play a key role in defining the “sound” of the environment. However, the wave nature of sound, and the fact that smooth objects are approximated by triangle meshes, pose challenges for creating realistic approximations of the reflection results. Existing methods which produce accurate results are too slow to be used in most interactive applications with dynamic scenes. This paper presents a method for reflections modeling called spatially sampled near-reflective diffraction (SSNRD), based on an existing approximate diffraction model, Volumetric Diffraction and Transmission (VDaT). The SSNRD model addresses the challenges mentioned above, produces results accurate to within 1-2 dB on average compared to edge diffraction, and is fast enough to generate thousands of paths in a few milliseconds in large scenes. This method encompasses scene geometry processing, path trajectory generation, spatial sampling for diffraction modeling, and a small deep neural network (DNN) to produce the final response of each path. All steps of the method are GPU-accelerated, and NVIDIA RTX real-time ray tracing hardware is used for spatial computing tasks beyond just traditional ray tracing.",
"title": "Specular Path Generation and Near-Reflective Diffraction in Interactive Acoustical Simulations",
"normalizedTitle": "Specular Path Generation and Near-Reflective Diffraction in Interactive Acoustical Simulations",
"fno": "10024005",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Diffraction",
"Reflection",
"Computational Modeling",
"Solid Modeling",
"Acoustics",
"Real Time Systems",
"Graphics Processing Units",
"Raytracing",
"Parallel Algorithms",
"Virtual Reality",
"Neural Nets",
"Graph And Tree Search Strategies",
"Acoustics"
],
"authors": [
{
"givenName": "Louis",
"surname": "Pisha",
"fullName": "Louis Pisha",
"affiliation": "Sonic Arts Research & Development, Qualcomm Institute, UC San Diego, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shahrokh",
"surname": "Yadegari",
"fullName": "Shahrokh Yadegari",
"affiliation": "Sonic Arts Research & Development, Qualcomm Institute, UC San Diego, CA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/gtsd/2016/3638/0/3638a161",
"title": "Phase Quantitative Computation for Multi-Phase Materials by Means of X-Ray Diffraction",
"doi": null,
"abstractUrl": "/proceedings-article/gtsd/2016/3638a161/12OmNBp52GE",
"parentPublication": {
"id": "proceedings/gtsd/2016/3638/0",
"title": "2016 3rd International Conference on Green Technology and Sustainable Development (GTSD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asmc/2003/7673/0/01194465",
"title": "Alignment and overlay metrology using a spectroscopic diffraction method",
"doi": null,
"abstractUrl": "/proceedings-article/asmc/2003/01194465/12OmNvA1hxz",
"parentPublication": {
"id": "proceedings/asmc/2003/7673/0",
"title": "IEEE/SEMI Advanced Semiconductor Manufacturing Conference and Workshop",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/waina/2016/2461/0/2461a493",
"title": "Modification of Dijkstra Proximity Matrix for Diffraction and Reflection Rays",
"doi": null,
"abstractUrl": "/proceedings-article/waina/2016/2461a493/12OmNyiUBpb",
"parentPublication": {
"id": "proceedings/waina/2016/2461/0",
"title": "2016 30th International Conference on Advanced Information Networking and Applications Workshops (WAINA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1997/8183/1/81831472",
"title": "Sensitivity and resolution limit of pulse-echo diffraction tomography imaging system",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1997/81831472/12OmNyqzM2I",
"parentPublication": {
"id": "proceedings/icip/1997/8183/1",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08307458",
"title": "Diffraction Kernels for Interactive Sound Propagation in Dynamic Environments",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08307458/13rRUwh80Hk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/09872132",
"title": "Towards Mixed-State Coded Diffraction Imaging",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/09872132/1GhRKXiGLEA",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/e-science/2022/6124/0/612400a021",
"title": "Identifying Structural Properties of Proteins from X-ray Free Electron Laser Diffraction Patterns",
"doi": null,
"abstractUrl": "/proceedings-article/e-science/2022/612400a021/1J6hyFTJbJ6",
"parentPublication": {
"id": "proceedings/e-science/2022/6124/0",
"title": "2022 IEEE 18th International Conference on e-Science (e-Science)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ai4s/2022/6207/0/620700a001",
"title": "Automated Continual Learning of Defect Identification in Coherent Diffraction Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/ai4s/2022/620700a001/1KnWGhFvE9W",
"parentPublication": {
"id": "proceedings/ai4s/2022/6207/0",
"title": "2022 IEEE/ACM International Workshop on Artificial Intelligence and Machine Learning for Scientific Applications (AI4S)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icapc/2022/6303/0/630300a337",
"title": "Probe design of reflective fiber optic hydrogen sensor",
"doi": null,
"abstractUrl": "/proceedings-article/icapc/2022/630300a337/1M7KXPskuxa",
"parentPublication": {
"id": "proceedings/icapc/2022/6303/0",
"title": "2022 International Conference on Applied Physics and Computing (ICAPC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2020/6497/0/649700a049",
"title": "See Deeper: Identifying Crystal Structure from X-ray Diffraction Patterns",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2020/649700a049/1olHyV9ytpu",
"parentPublication": {
"id": "proceedings/cw/2020/6497/0",
"title": "2020 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10023999",
"articleId": "1K9ssyL8VvG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10024310",
"articleId": "1KaBaMU2Iog",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1KkXilyTRGU",
"name": "ttg555501-010024005s1-tvcg-3238662-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010024005s1-tvcg-3238662-mm.zip",
"extension": "zip",
"size": "239 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1KaBaMU2Iog",
"doi": "10.1109/TVCG.2023.3238989",
"abstract": "It is common to advise against using 3D to visualize abstract data such as networks, however Ware and Mitchell's 2008 study showed that path tracing in a network is less error prone in 3D than in 2D. It is unclear, however, if 3D retains its advantage when the 2D presentation of a network is improved using edge-routing, and when simple interaction techniques for exploring the network are available. We address this with two studies of path tracing under new conditions. The first study was preregistered, involved 34 users, and compared 2D and 3D layouts that the user could rotate and move in virtual reality with a handheld controller. Error rates were lower in 3D than in 2D, despite the use of edge-routing in 2D and the use of mouse-driven interactive highlighting of edges. The second study involved 12 users and investigated data physicalization, comparing 3D layouts in virtual reality versus physical 3D printouts of networks augmented with a Microsoft HoloLens headset. No difference was found in error rate, but users performed a variety of actions with their fingers in the physical condition which can inform new interaction techniques.",
"abstracts": [
{
"abstractType": "Regular",
"content": "It is common to advise against using 3D to visualize abstract data such as networks, however Ware and Mitchell's 2008 study showed that path tracing in a network is less error prone in 3D than in 2D. It is unclear, however, if 3D retains its advantage when the 2D presentation of a network is improved using edge-routing, and when simple interaction techniques for exploring the network are available. We address this with two studies of path tracing under new conditions. The first study was preregistered, involved 34 users, and compared 2D and 3D layouts that the user could rotate and move in virtual reality with a handheld controller. Error rates were lower in 3D than in 2D, despite the use of edge-routing in 2D and the use of mouse-driven interactive highlighting of edges. The second study involved 12 users and investigated data physicalization, comparing 3D layouts in virtual reality versus physical 3D printouts of networks augmented with a Microsoft HoloLens headset. No difference was found in error rate, but users performed a variety of actions with their fingers in the physical condition which can inform new interaction techniques.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "It is common to advise against using 3D to visualize abstract data such as networks, however Ware and Mitchell's 2008 study showed that path tracing in a network is less error prone in 3D than in 2D. It is unclear, however, if 3D retains its advantage when the 2D presentation of a network is improved using edge-routing, and when simple interaction techniques for exploring the network are available. We address this with two studies of path tracing under new conditions. The first study was preregistered, involved 34 users, and compared 2D and 3D layouts that the user could rotate and move in virtual reality with a handheld controller. Error rates were lower in 3D than in 2D, despite the use of edge-routing in 2D and the use of mouse-driven interactive highlighting of edges. The second study involved 12 users and investigated data physicalization, comparing 3D layouts in virtual reality versus physical 3D printouts of networks augmented with a Microsoft HoloLens headset. No difference was found in error rate, but users performed a variety of actions with their fingers in the physical condition which can inform new interaction techniques.",
"title": "Path Tracing in 2D, 3D, and Physicalized Networks",
"normalizedTitle": "Path Tracing in 2D, 3D, and Physicalized Networks",
"fno": "10024310",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Three Dimensional Displays",
"Layout",
"Task Analysis",
"Headphones",
"Data Visualization",
"Mice",
"Visualization",
"Graph Visualization",
"3 D Printing",
"Augmented Reality",
"Data Physicalization",
"Tangible",
"Path Following",
"Path Finding"
],
"authors": [
{
"givenName": "Michael J.",
"surname": "McGuffin",
"fullName": "Michael J. McGuffin",
"affiliation": "École de technologie supérieure, Montreal, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ryan",
"surname": "Servera",
"fullName": "Ryan Servera",
"affiliation": "École de technologie supérieure, Montreal, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Marie",
"surname": "Forest",
"fullName": "Marie Forest",
"affiliation": "École de technologie supérieure, Montreal, Canada",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-vis/2005/2766/0/01532837",
"title": "Eyegaze analysis of displays with combined 2D and 3D views",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/01532837/12OmNBvkdk6",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457b942",
"title": "Generating Holistic 3D Scene Abstractions for Text-Based Image Retrieval",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457b942/12OmNvoWV1x",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2015/6886/0/07131755",
"title": "Mapping 2D input to 3D immersive spatial augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2015/07131755/12OmNwAKCNT",
"parentPublication": {
"id": "proceedings/3dui/2015/6886/0",
"title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dvis/2014/6826/0/07160095",
"title": "Beyond the classical monoscopic 3D in graph analytics: An experimental study of the impact of stereoscopy",
"doi": null,
"abstractUrl": "/proceedings-article/3dvis/2014/07160095/12OmNxwENmo",
"parentPublication": {
"id": "proceedings/3dvis/2014/6826/0",
"title": "2014 IEEE VIS International Workshop on 3DVis (3DVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bdva/2015/7343/0/07314295",
"title": "Hybrid-Dimensional Visualization and Interaction - Integrating 2D and 3D Visualization with Semi-Immersive Navigation Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/bdva/2015/07314295/12OmNzBOi7E",
"parentPublication": {
"id": "proceedings/bdva/2015/7343/0",
"title": "2015 Big Data Visual Analytics (BDVA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sose/2017/6320/0/07943298",
"title": "A 2D and 3D Indoor Mapping Approach for Virtual Navigation Services",
"doi": null,
"abstractUrl": "/proceedings-article/sose/2017/07943298/12OmNzcxZuL",
"parentPublication": {
"id": "proceedings/sose/2017/6320/0",
"title": "2017 11th IEEE Symposium on Service-Oriented System Engineering (SOSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/01/06826569",
"title": "The Impact of Interactivity on Comprehending 2D and 3D Visualizations of Movement Data",
"doi": null,
"abstractUrl": "/journal/tg/2015/01/06826569/13rRUyYjKah",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200b836",
"title": "SAT: 2D Semantics Assisted Training for 3D Visual Grounding",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200b836/1BmEKYN6gbS",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200k0913",
"title": "3D-FRONT: 3D Furnished Rooms with layOuts and semaNTics",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200k0913/1BmEzQDqGek",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/04/08906136",
"title": "Global Beautification of 2D and 3D Layouts With Interactive Ambiguity Resolution",
"doi": null,
"abstractUrl": "/journal/tg/2021/04/08906136/1f5qMIjZR5K",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10024005",
"articleId": "1K9ss42cTAI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10024360",
"articleId": "1KaBabqZxSg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1KcgZ6aRZLO",
"name": "ttg555501-010024310s1-supp1-3238989.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010024310s1-supp1-3238989.pdf",
"extension": "pdf",
"size": "15.1 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1KaBabqZxSg",
"doi": "10.1109/TVCG.2023.3238821",
"abstract": "In this paper, we propose the t-FDP model, a force-directed placement method based on a novel bounded short-range force (t-force) defined by Student’s t-distribution. Our formulation is flexible, exerts limited repulsive forces for nearby nodes and can be adapted separately in its short- and long-range effects. Using such forces in force-directed graph layouts yields better neighborhood preservation than current methods, while maintaining low stress errors. Our efficient implementation using a Fast Fourier Transform is one order of magnitude faster than state-of-the-art methods and two orders faster on the GPU, enabling us to perform parameter tuning by globally and locally adjusting the t-force in real-time for complex graphs. We demonstrate the quality of our approach by numerical evaluation against state-of-the-art approaches and extensions for interactive exploration.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we propose the t-FDP model, a force-directed placement method based on a novel bounded short-range force (t-force) defined by Student’s t-distribution. Our formulation is flexible, exerts limited repulsive forces for nearby nodes and can be adapted separately in its short- and long-range effects. Using such forces in force-directed graph layouts yields better neighborhood preservation than current methods, while maintaining low stress errors. Our efficient implementation using a Fast Fourier Transform is one order of magnitude faster than state-of-the-art methods and two orders faster on the GPU, enabling us to perform parameter tuning by globally and locally adjusting the t-force in real-time for complex graphs. We demonstrate the quality of our approach by numerical evaluation against state-of-the-art approaches and extensions for interactive exploration.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we propose the t-FDP model, a force-directed placement method based on a novel bounded short-range force (t-force) defined by Student’s t-distribution. Our formulation is flexible, exerts limited repulsive forces for nearby nodes and can be adapted separately in its short- and long-range effects. Using such forces in force-directed graph layouts yields better neighborhood preservation than current methods, while maintaining low stress errors. Our efficient implementation using a Fast Fourier Transform is one order of magnitude faster than state-of-the-art methods and two orders faster on the GPU, enabling us to perform parameter tuning by globally and locally adjusting the t-force in real-time for complex graphs. We demonstrate the quality of our approach by numerical evaluation against state-of-the-art approaches and extensions for interactive exploration.",
"title": "Force-Directed Graph Layouts Revisited: A New Force Based on the T-Distribution",
"normalizedTitle": "Force-Directed Graph Layouts Revisited: A New Force Based on the T-Distribution",
"fno": "10024360",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Layout",
"Computational Modeling",
"Force",
"Stress",
"Springs",
"Scalability",
"Graphics Processing Units",
"FFT",
"Force Directed Placement",
"Graph Layout",
"Students T Distribution"
],
"authors": [
{
"givenName": "Fahai",
"surname": "Zhong",
"fullName": "Fahai Zhong",
"affiliation": "Department of Computer Science, Shandong University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mingliang",
"surname": "Xue",
"fullName": "Mingliang Xue",
"affiliation": "Department of Computer Science, Shandong University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jian",
"surname": "Zhang",
"fullName": "Jian Zhang",
"affiliation": "Computer Network Information Center, Chinese Academy of Sciences, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Fan",
"surname": "Zhang",
"fullName": "Fan Zhang",
"affiliation": "School of Computer Science and Technology, SDTBU, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rui",
"surname": "Ban",
"fullName": "Rui Ban",
"affiliation": "Intelligent Network Design Institute, CITC, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Oliver",
"surname": "Deussen",
"fullName": "Oliver Deussen",
"affiliation": "Computer and Information Science, University of Konstanz, Konstanz, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yunhai",
"surname": "Wang",
"fullName": "Yunhai Wang",
"affiliation": "Department of Computer Science, Shandong University, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/date/2005/2288/2/228820902",
"title": "An Improved Multi-Level Framework for Force-Directed Placement",
"doi": null,
"abstractUrl": "/proceedings-article/date/2005/228820902/12OmNAqkSHJ",
"parentPublication": {
"id": "proceedings/date/2005/2288/2",
"title": "Design, Automation & Test in Europe Conference & Exhibition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccd/1997/8206/0/82060752",
"title": "Dynamic bounding of successor force computations in the force directed list scheduling algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/iccd/1997/82060752/12OmNCfSqQc",
"parentPublication": {
"id": "proceedings/iccd/1997/8206/0",
"title": "Proceedings International Conference on Computer Design VLSI in Computers and Processors",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispdc/2012/2599/0/06341510",
"title": "Scalable Force Directed Graph Layout Algorithms Using Fast Multipole Methods",
"doi": null,
"abstractUrl": "/proceedings-article/ispdc/2012/06341510/12OmNx3HI8B",
"parentPublication": {
"id": "proceedings/ispdc/2012/2599/0",
"title": "2012 11th International Symposium on Parallel and Distributed Computing (ISPDC 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2012/4771/0/4771a454",
"title": "A Multilevel Force-directed Graph Drawing Algorithm Using Multilevel Global Force Approximation",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2012/4771a454/12OmNy2Jt2D",
"parentPublication": {
"id": "proceedings/iv/2012/4771/0",
"title": "2012 16th International Conference on Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/04/v0536",
"title": "Drawing Directed Graphs Using Quadratic Programming",
"doi": null,
"abstractUrl": "/journal/tg/2006/04/v0536/13rRUxZRbnT",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/topoinvis/2022/9354/0/935400a081",
"title": "Untangling Force-Directed Layouts Using Persistent Homology",
"doi": null,
"abstractUrl": "/proceedings-article/topoinvis/2022/935400a081/1J2XKiZs7xS",
"parentPublication": {
"id": "proceedings/topoinvis/2022/9354/0",
"title": "2022 Topological Data Analysis and Visualization (TopoInVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10005087",
"title": "SubLinearForce: Fully Sublinear-Time Force Computation for Large Complex Graph Drawing",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10005087/1JC5yDf0E5q",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08807379",
"title": "Persistent Homology Guided Force-Directed Graph Layouts",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08807379/1cG6h8OkgJq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ldav/2019/2605/0/08944364",
"title": "Force-Directed Graph Layouts by Edge Sampling",
"doi": null,
"abstractUrl": "/proceedings-article/ldav/2019/08944364/1grOFicLl9S",
"parentPublication": {
"id": "proceedings/ldav/2019/2605/0",
"title": "2019 IEEE 9th Symposium on Large Data Analysis and Visualization (LDAV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2020/8014/0/801400a096",
"title": "Accelerating Force-Directed Graph Drawing with RT Cores",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2020/801400a096/1qROE1kZkek",
"parentPublication": {
"id": "proceedings/vis/2020/8014/0",
"title": "2020 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10024310",
"articleId": "1KaBaMU2Iog",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10024388",
"articleId": "1KaB9SqICWs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1KcgXK34IGk",
"name": "ttg555501-010024360s1-supp2-3238821.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010024360s1-supp2-3238821.mp4",
"extension": "mp4",
"size": "83.9 MB",
"__typename": "WebExtraType"
},
{
"id": "1KcgYxZCOE8",
"name": "ttg555501-010024360s1-supp1-3238821.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010024360s1-supp1-3238821.pdf",
"extension": "pdf",
"size": "56.8 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1KaB9SqICWs",
"doi": "10.1109/TVCG.2023.3238909",
"abstract": "Node-link diagrams are widely used to visualize graphs. Most graph layout algorithms only use graph topology for aesthetic goals (e.g., minimize node occlusions and edge crossings) or use node attributes for exploration goals (e.g., preserve visible communities). Existing hybrid methods that bind the two perspectives still suffer from various generation restrictions (e.g., limited input types and required manual adjustments and prior knowledge of graphs) and the imbalance between aesthetic and exploration goals. In this paper, we propose a flexible embedding-based graph exploration pipeline to enjoy the best of both graph topology and node attributes. First, we leverage embedding algorithms for attributed graphs to encode the two perspectives into latent space. Then, we present an embedding-driven graph layout algorithm, GEGraph, which can achieve aesthetic layouts with better community preservation to support an easy interpretation of the graph structure. Next, graph explorations are extended based on the generated graph layout and insights extracted from the embedding vectors. Illustrated with examples, we build a layout-preserving aggregation method with Focus+Context interaction and a related nodes searching approach with multiple proximity strategies. Finally, we conduct quantitative and qualitative evaluations, a user study, and two case studies to validate our approach.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Node-link diagrams are widely used to visualize graphs. Most graph layout algorithms only use graph topology for aesthetic goals (e.g., minimize node occlusions and edge crossings) or use node attributes for exploration goals (e.g., preserve visible communities). Existing hybrid methods that bind the two perspectives still suffer from various generation restrictions (e.g., limited input types and required manual adjustments and prior knowledge of graphs) and the imbalance between aesthetic and exploration goals. In this paper, we propose a flexible embedding-based graph exploration pipeline to enjoy the best of both graph topology and node attributes. First, we leverage embedding algorithms for attributed graphs to encode the two perspectives into latent space. Then, we present an embedding-driven graph layout algorithm, GEGraph, which can achieve aesthetic layouts with better community preservation to support an easy interpretation of the graph structure. Next, graph explorations are extended based on the generated graph layout and insights extracted from the embedding vectors. Illustrated with examples, we build a layout-preserving aggregation method with Focus+Context interaction and a related nodes searching approach with multiple proximity strategies. Finally, we conduct quantitative and qualitative evaluations, a user study, and two case studies to validate our approach.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Node-link diagrams are widely used to visualize graphs. Most graph layout algorithms only use graph topology for aesthetic goals (e.g., minimize node occlusions and edge crossings) or use node attributes for exploration goals (e.g., preserve visible communities). Existing hybrid methods that bind the two perspectives still suffer from various generation restrictions (e.g., limited input types and required manual adjustments and prior knowledge of graphs) and the imbalance between aesthetic and exploration goals. In this paper, we propose a flexible embedding-based graph exploration pipeline to enjoy the best of both graph topology and node attributes. First, we leverage embedding algorithms for attributed graphs to encode the two perspectives into latent space. Then, we present an embedding-driven graph layout algorithm, GEGraph, which can achieve aesthetic layouts with better community preservation to support an easy interpretation of the graph structure. Next, graph explorations are extended based on the generated graph layout and insights extracted from the embedding vectors. Illustrated with examples, we build a layout-preserving aggregation method with Focus+Context interaction and a related nodes searching approach with multiple proximity strategies. Finally, we conduct quantitative and qualitative evaluations, a user study, and two case studies to validate our approach.",
"title": "Graph Exploration with Embedding-Guided Layouts",
"normalizedTitle": "Graph Exploration with Embedding-Guided Layouts",
"fno": "10024388",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Layout",
"Topology",
"Pipelines",
"Feature Extraction",
"Network Topology",
"Clustering Algorithms",
"Hybrid Power Systems",
"Graph Embedding",
"Graph Layout",
"Graph Exploration"
],
"authors": [
{
"givenName": "Leixian",
"surname": "Shen",
"fullName": "Leixian Shen",
"affiliation": "Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhiwei",
"surname": "Tai",
"fullName": "Zhiwei Tai",
"affiliation": "Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Enya",
"surname": "Shen",
"fullName": "Enya Shen",
"affiliation": "Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jianmin",
"surname": "Wang",
"fullName": "Jianmin Wang",
"affiliation": "Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2008/06/ttg2008061301",
"title": "Rapid Graph Layout Using Space Filling Curves",
"doi": null,
"abstractUrl": "/journal/tg/2008/06/ttg2008061301/13rRUx0xPIx",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2008/06/ttg2008061333",
"title": "Perceptual Organization in User-Generated Graph Layouts",
"doi": null,
"abstractUrl": "/journal/tg/2008/06/ttg2008061333/13rRUyeCkac",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192724",
"title": "AmbiguityVis: Visualization of Ambiguity in Graph Layouts",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192724/13rRUyuegpa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/04/09868157",
"title": "Unsupervised Graph Embedding via Adaptive Graph Learning",
"doi": null,
"abstractUrl": "/journal/tp/2023/04/09868157/1G9WoQwVOHm",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2019/04/08739137",
"title": "Evaluating the Readability of Force Directed Graph Layouts: A Deep Learning Approach",
"doi": null,
"abstractUrl": "/magazine/cg/2019/04/08739137/1aXM6mNkouI",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08807379",
"title": "Persistent Homology Guided Force-Directed Graph Layouts",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08807379/1cG6h8OkgJq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300e521",
"title": "Neural Turtle Graphics for Modeling City Road Layouts",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300e521/1hVleMGTxPW",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2020/8316/0/831600a442",
"title": "Force2Vec: Parallel Force-Directed Graph Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2020/831600a442/1r54GQx8BUc",
"parentPublication": {
"id": "proceedings/icdm/2020/8316/0",
"title": "2020 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2022/12/09392297",
"title": "Walking With Attention: Self-Guided Walking for Heterogeneous Graph Embedding",
"doi": null,
"abstractUrl": "/journal/tk/2022/12/09392297/1sq7pCMZtyU",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigdia/2020/2232/0/223200a125",
"title": "Graph Layout Based on Network Embedding and Improved Dimensionality Reduction",
"doi": null,
"abstractUrl": "/proceedings-article/bigdia/2020/223200a125/1stvA7W3Ota",
"parentPublication": {
"id": "proceedings/bigdia/2020/2232/0",
"title": "2020 6th International Conference on Big Data and Information Analytics (BigDIA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10023985",
"articleId": "1K9stqcyyQM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10024360",
"articleId": "1KaBabqZxSg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1K3XDZ8pUAg",
"doi": "10.1109/TVCG.2023.3237768",
"abstract": "Electronic transitions in molecules due to the absorption or emission of light is a complex quantum mechanical process. Their study plays an important role in the design of novel materials. A common yet challenging task in the study is to determine the nature of electronic transitions, namely which subgroups of the molecule are involved in the transition by donating or accepting electrons, followed by an investigation of the variation in the donor-acceptor behavior for different transitions or conformations of the molecules. In this paper, we present a novel approach for the analysis of a bivariate field and show its applicability to the study of electronic transitions. This approach is based on two novel operators, the continuous scatterplot (CSP) lens operator and the CSP peel operator, that enable effective visual analysis of bivariate fields. Both operators can be applied independently or together to facilitate analysis. The operators motivate the design of control polygon inputs to extract fiber surfaces of interest in the spatial domain. The CSPs are annotated with a quantitative measure to further support the visual analysis. We study different molecular systems and demonstrate how the CSP peel and CSP lens operators help identify and study donor and acceptor characteristics in molecular systems.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Electronic transitions in molecules due to the absorption or emission of light is a complex quantum mechanical process. Their study plays an important role in the design of novel materials. A common yet challenging task in the study is to determine the nature of electronic transitions, namely which subgroups of the molecule are involved in the transition by donating or accepting electrons, followed by an investigation of the variation in the donor-acceptor behavior for different transitions or conformations of the molecules. In this paper, we present a novel approach for the analysis of a bivariate field and show its applicability to the study of electronic transitions. This approach is based on two novel operators, the continuous scatterplot (CSP) lens operator and the CSP peel operator, that enable effective visual analysis of bivariate fields. Both operators can be applied independently or together to facilitate analysis. The operators motivate the design of control polygon inputs to extract fiber surfaces of interest in the spatial domain. The CSPs are annotated with a quantitative measure to further support the visual analysis. We study different molecular systems and demonstrate how the CSP peel and CSP lens operators help identify and study donor and acceptor characteristics in molecular systems.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Electronic transitions in molecules due to the absorption or emission of light is a complex quantum mechanical process. Their study plays an important role in the design of novel materials. A common yet challenging task in the study is to determine the nature of electronic transitions, namely which subgroups of the molecule are involved in the transition by donating or accepting electrons, followed by an investigation of the variation in the donor-acceptor behavior for different transitions or conformations of the molecules. In this paper, we present a novel approach for the analysis of a bivariate field and show its applicability to the study of electronic transitions. This approach is based on two novel operators, the continuous scatterplot (CSP) lens operator and the CSP peel operator, that enable effective visual analysis of bivariate fields. Both operators can be applied independently or together to facilitate analysis. The operators motivate the design of control polygon inputs to extract fiber surfaces of interest in the spatial domain. The CSPs are annotated with a quantitative measure to further support the visual analysis. We study different molecular systems and demonstrate how the CSP peel and CSP lens operators help identify and study donor and acceptor characteristics in molecular systems.",
"title": "Continuous Scatterplot Operators for Bivariate Analysis and Study of Electronic Transitions",
"normalizedTitle": "Continuous Scatterplot Operators for Bivariate Analysis and Study of Electronic Transitions",
"fno": "10021888",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visualization",
"Orbits",
"Lenses",
"Pipelines",
"Isosurfaces",
"Space Vehicles",
"Behavioral Sciences",
"Bivariate Field Analysis",
"Continuous Scatterplot",
"Fiber Surface",
"Control Polygon",
"Visual Analysis",
"Electronic Transitions"
],
"authors": [
{
"givenName": "Mohit",
"surname": "Sharma",
"fullName": "Mohit Sharma",
"affiliation": "Department of Computer Science and Automation, Indian Institute of Science, Bangalore, India",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Talha Bin",
"surname": "Masood",
"fullName": "Talha Bin Masood",
"affiliation": "Department of Science and Technology (ITN), Linköping University, Norrköping, Sweden",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Signe S.",
"surname": "Thygesen",
"fullName": "Signe S. Thygesen",
"affiliation": "Department of Science and Technology (ITN), Linköping University, Norrköping, Sweden",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mathieu",
"surname": "Linares",
"fullName": "Mathieu Linares",
"affiliation": "Department of Science and Technology (ITN), Linköping University, Norrköping, Sweden",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ingrid",
"surname": "Hotz",
"fullName": "Ingrid Hotz",
"affiliation": "Department of Science and Technology (ITN), Linköping University, Norrköping, Sweden",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Vijay",
"surname": "Natarajan",
"fullName": "Vijay Natarajan",
"affiliation": "Department of Computer Science and Automation, Indian Institute of Science, Bangalore, India",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vis/2021/3335/0/333500a096",
"title": "Segmentation Driven Peeling for Visual Analysis of Electronic Transitions",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2021/333500a096/1yXubIDXHuU",
"parentPublication": {
"id": "proceedings/vis/2021/3335/0",
"title": "2021 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10021892",
"articleId": "1K3XDAtRZ8Q",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10024005",
"articleId": "1K9ss42cTAI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1K9stcUx2fu",
"name": "ttg555501-010021888s1-supp1-3237768.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010021888s1-supp1-3237768.pdf",
"extension": "pdf",
"size": "99.1 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1K3XC5MZdGE",
"doi": "10.1109/TVCG.2023.3238309",
"abstract": "The use of Augmented Reality (AR) for navigation purposes has shown beneficial in assisting physicians during the performance of surgical procedures. These applications commonly require knowing the pose of surgical tools and patients to provide visual information that surgeons can use during the performance of the task. Existing medical-grade tracking systems use infrared cameras placed inside the Operating Room (OR) to identify retro-reflective markers attached to objects of interest and compute their pose. Some commercially available AR Head-Mounted Displays (HMDs) use similar cameras for self-localization, hand tracking, and estimating the objects' depth. This work presents a framework that uses the built-in cameras of AR HMDs to enable accurate tracking of retro-reflective markers without the need to integrate any additional electronics into the HMD. The proposed framework can simultaneously track multiple tools without having previous knowledge of their geometry and only requires establishing a local network between the headset and a workstation. Our results show that the tracking and detection of the markers can be achieved with an accuracy of <inline-formula><tex-math notation=\"LaTeX\">Z_$0.09\\pm 0.06\\ mm$_Z</tex-math></inline-formula> on lateral translation, <inline-formula><tex-math notation=\"LaTeX\">Z_$0.42 \\pm 0.32\\ mm$_Z</tex-math></inline-formula> on longitudinal translation and <inline-formula><tex-math notation=\"LaTeX\">Z_$0.80 \\pm 0.39^\\circ$_Z</tex-math></inline-formula> for rotations around the vertical axis. Furthermore, to showcase the relevance of the proposed framework, we evaluate the system's performance in the context of surgical procedures. This use case was designed to replicate the scenarios of k-wire insertions in orthopedic procedures. For evaluation, seven surgeons were provided with visual navigation and asked to perform 24 injections using the proposed framework. A second study with ten participants served to investigate the capabilities of the framework in the context of more general scenarios. Results from these studies provided comparable accuracy to those reported in the literature for AR-based navigation procedures.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The use of Augmented Reality (AR) for navigation purposes has shown beneficial in assisting physicians during the performance of surgical procedures. These applications commonly require knowing the pose of surgical tools and patients to provide visual information that surgeons can use during the performance of the task. Existing medical-grade tracking systems use infrared cameras placed inside the Operating Room (OR) to identify retro-reflective markers attached to objects of interest and compute their pose. Some commercially available AR Head-Mounted Displays (HMDs) use similar cameras for self-localization, hand tracking, and estimating the objects' depth. This work presents a framework that uses the built-in cameras of AR HMDs to enable accurate tracking of retro-reflective markers without the need to integrate any additional electronics into the HMD. The proposed framework can simultaneously track multiple tools without having previous knowledge of their geometry and only requires establishing a local network between the headset and a workstation. Our results show that the tracking and detection of the markers can be achieved with an accuracy of <inline-formula><tex-math notation=\"LaTeX\">$0.09\\pm 0.06\\ mm$</tex-math></inline-formula> on lateral translation, <inline-formula><tex-math notation=\"LaTeX\">$0.42 \\pm 0.32\\ mm$</tex-math></inline-formula> on longitudinal translation and <inline-formula><tex-math notation=\"LaTeX\">$0.80 \\pm 0.39^\\circ$</tex-math></inline-formula> for rotations around the vertical axis. Furthermore, to showcase the relevance of the proposed framework, we evaluate the system's performance in the context of surgical procedures. This use case was designed to replicate the scenarios of k-wire insertions in orthopedic procedures. For evaluation, seven surgeons were provided with visual navigation and asked to perform 24 injections using the proposed framework. A second study with ten participants served to investigate the capabilities of the framework in the context of more general scenarios. Results from these studies provided comparable accuracy to those reported in the literature for AR-based navigation procedures.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The use of Augmented Reality (AR) for navigation purposes has shown beneficial in assisting physicians during the performance of surgical procedures. These applications commonly require knowing the pose of surgical tools and patients to provide visual information that surgeons can use during the performance of the task. Existing medical-grade tracking systems use infrared cameras placed inside the Operating Room (OR) to identify retro-reflective markers attached to objects of interest and compute their pose. Some commercially available AR Head-Mounted Displays (HMDs) use similar cameras for self-localization, hand tracking, and estimating the objects' depth. This work presents a framework that uses the built-in cameras of AR HMDs to enable accurate tracking of retro-reflective markers without the need to integrate any additional electronics into the HMD. The proposed framework can simultaneously track multiple tools without having previous knowledge of their geometry and only requires establishing a local network between the headset and a workstation. Our results show that the tracking and detection of the markers can be achieved with an accuracy of - on lateral translation, - on longitudinal translation and - for rotations around the vertical axis. Furthermore, to showcase the relevance of the proposed framework, we evaluate the system's performance in the context of surgical procedures. This use case was designed to replicate the scenarios of k-wire insertions in orthopedic procedures. For evaluation, seven surgeons were provided with visual navigation and asked to perform 24 injections using the proposed framework. A second study with ten participants served to investigate the capabilities of the framework in the context of more general scenarios. Results from these studies provided comparable accuracy to those reported in the literature for AR-based navigation procedures.",
"title": "STTAR: Surgical Tool Tracking using Off-the-Shelf Augmented Reality Head-Mounted Displays",
"normalizedTitle": "STTAR: Surgical Tool Tracking using Off-the-Shelf Augmented Reality Head-Mounted Displays",
"fno": "10021890",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Cameras",
"Surgery",
"Navigation",
"Biomedical Imaging",
"Visualization",
"Resists",
"Task Analysis",
"Augmented Reality",
"Computer Assisted Medical Procedures",
"Navigation",
"Tracking"
],
"authors": [
{
"givenName": "Alejandro",
"surname": "Martin-Gomez",
"fullName": "Alejandro Martin-Gomez",
"affiliation": "Laboratory for Computational Sensing and Robotics, Whiting School of Engineering, Johns Hopkins University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Haowei",
"surname": "Li",
"fullName": "Haowei Li",
"affiliation": "Department of Biomedical Engineering, Tsinghua University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tianyu",
"surname": "Song",
"fullName": "Tianyu Song",
"affiliation": "Chair for Computer Aided Medical Procedures and Augmented Reality, Department of Informatics, Technical University of Munich, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sheng",
"surname": "Yang",
"fullName": "Sheng Yang",
"affiliation": "Department of Biomedical Engineering, Tsinghua University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Guangzhi",
"surname": "Wang",
"fullName": "Guangzhi Wang",
"affiliation": "Department of Biomedical Engineering, Tsinghua University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hui",
"surname": "Ding",
"fullName": "Hui Ding",
"affiliation": "Department of Biomedical Engineering, Tsinghua University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Nassir",
"surname": "Navab",
"fullName": "Nassir Navab",
"affiliation": "Laboratory for Computational Sensing and Robotics, Whiting School of Engineering, Johns Hopkins University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhe",
"surname": "Zhao",
"fullName": "Zhe Zhao",
"affiliation": "Department of Orthopaedics, Beijing Tsinghua Changgung Hospital. School of Clinical Medicine, Tsinghua University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mehran",
"surname": "Armand",
"fullName": "Mehran Armand",
"affiliation": "Laboratory for Computational Sensing and Robotics, Whiting School of Engineering, Johns Hopkins University, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-16",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/var4good/2018/5977/0/08576884",
"title": "Augmented Visual Instruction for Surgical Practice and Training",
"doi": null,
"abstractUrl": "/proceedings-article/var4good/2018/08576884/17D45WODasn",
"parentPublication": {
"id": "proceedings/var4good/2018/5977/0",
"title": "2018 IEEE Workshop on Augmented and Virtual Realities for Good (VAR4Good)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tq/5555/01/09933877",
"title": "Matrix-Based Secret Sharing for Reversible Data Hiding in Encrypted Images",
"doi": null,
"abstractUrl": "/journal/tq/5555/01/09933877/1HWLN6aNgDS",
"parentPublication": {
"id": "trans/tq",
"title": "IEEE Transactions on Dependable and Secure Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/09965747",
"title": "<inline-formula><tex-math notation=\"LaTeX\">Z_$\\mathcal {X}$_Z</tex-math></inline-formula>-Metric: An N-Dimensional Information-Theoretic Framework for Groupwise Registration and Deep Combined Computing",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/09965747/1IHMPhf3uW4",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2022/8487/0/848700a164",
"title": "A Practical AR-based Surgical Navigation System Using Optical See-through Head Mounted Display",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2022/848700a164/1J6hB8fdg1W",
"parentPublication": {
"id": "proceedings/bibe/2022/8487/0",
"title": "2022 IEEE 22nd International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10077744",
"title": "A Comparative Evaluation of Optical See-through Augmented Reality in Surgical Guidance",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10077744/1LH8EZ3NEGI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2020/02/08730423",
"title": "Predicting Carbon Spectrum in Heteronuclear Single Quantum Coherence Spectroscopy for Online Feedback During Surgery",
"doi": null,
"abstractUrl": "/journal/tb/2020/02/08730423/1aAwyubtkha",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/02/09122448",
"title": "ManifoldNet: A Deep Neural Network for Manifold-Valued Data With Applications",
"doi": null,
"abstractUrl": "/journal/tp/2022/02/09122448/1kRRwHRZ1Li",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/2021/10/09194366",
"title": "A Novel Measurement for Network Reliability",
"doi": null,
"abstractUrl": "/journal/tc/2021/10/09194366/1n0EqDZV3X2",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/07/09253561",
"title": "AR-Loupe: Magnified Augmented Reality by Combining an Optical See-Through Head-Mounted Display and a Loupe",
"doi": null,
"abstractUrl": "/journal/tg/2022/07/09253561/1oDXHeBJHNe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2023/01/09422157",
"title": "On Efficient Large Maximal Biplex Discovery",
"doi": null,
"abstractUrl": "/journal/tk/2023/01/09422157/1tiTooWy0gg",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10018537",
"articleId": "1K0DFSXIg5W",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10021892",
"articleId": "1K3XDAtRZ8Q",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1K3XDAtRZ8Q",
"doi": "10.1109/TVCG.2023.3238008",
"abstract": "This paper introduces an efficient algorithm for persistence diagram computation, given an input piecewise linear scalar field <inline-formula><tex-math notation=\"LaTeX\">Z_$f$_Z</tex-math></inline-formula> defined on a <inline-formula><tex-math notation=\"LaTeX\">Z_$d$_Z</tex-math></inline-formula>-dimensional simplicial complex <inline-formula><tex-math notation=\"LaTeX\">Z_$\\mathcal {K}$_Z</tex-math></inline-formula>, with <inline-formula><tex-math notation=\"LaTeX\">Z_$d \\leq 3$_Z</tex-math></inline-formula>. Our work revisits the seminal algorithm <italic>“PairSimplices”</italic> [31], [103] with discrete Morse theory (DMT) [34], [80], which greatly reduces the number of input simplices to consider. Further, we also extend to DMT and accelerate the stratification strategy described in <italic>“PairSimplices”</italic> [31], [103] for the fast computation of the <inline-formula><tex-math notation=\"LaTeX\">Z_$0^{th}$_Z</tex-math></inline-formula> and <inline-formula><tex-math notation=\"LaTeX\">Z_$(d-1)^{th}$_Z</tex-math></inline-formula> diagrams, noted <inline-formula><tex-math notation=\"LaTeX\">Z_$\\mathcal {D}_{0}(f)$_Z</tex-math></inline-formula> and <inline-formula><tex-math notation=\"LaTeX\">Z_$\\mathcal {D}_{d-1}(f)$_Z</tex-math></inline-formula>. Minima-saddle persistence pairs (<inline-formula><tex-math notation=\"LaTeX\">Z_$\\mathcal {D}_{0}(f)$_Z</tex-math></inline-formula>) and saddle-maximum persistence pairs (<inline-formula><tex-math notation=\"LaTeX\">Z_$\\mathcal {D}_{d-1}(f)$_Z</tex-math></inline-formula>) are efficiently computed by processing , with a Union-Find , the unstable sets of 1-saddles and the stable sets of <inline-formula><tex-math notation=\"LaTeX\">Z_$(d-1)$_Z</tex-math></inline-formula>-saddles. We provide a detailed description of the (optional) handling of the boundary component of <inline-formula><tex-math notation=\"LaTeX\">Z_$\\mathcal {K}$_Z</tex-math></inline-formula> when processing <inline-formula><tex-math notation=\"LaTeX\">Z_$(d-1)$_Z</tex-math></inline-formula>-saddles. This fast pre-computation for the dimensions 0 and <inline-formula><tex-math notation=\"LaTeX\">Z_$(d-1)$_Z</tex-math></inline-formula> enables an aggressive specialization of [4] to the 3D case, which results in a drastic reduction of the number of input simplices for the computation of <inline-formula><tex-math notation=\"LaTeX\">Z_$\\mathcal {D}_{1}(f)$_Z</tex-math></inline-formula>, the intermediate layer of the <italic>sandwich</italic>. Finally, we document several performance improvements via shared-memory parallelism. We provide an open-source implementation of our algorithm for reproducibility purposes. We also contribute a reproducible benchmark package, which exploits three-dimensional data from a public repository and compares our algorithm to a variety of publicly available implementations. Extensive experiments indicate that our algorithm improves by two orders of magnitude the time performance of the seminal <italic>“PairSimplices”</italic> algorithm it extends. Moreover, it also improves memory footprint and time performance over a selection of 14 competing approaches, with a substantial gain over the fastest available approaches, while producing a strictly identical output. We illustrate the utility of our contributions with an application to the fast and robust extraction of persistent 1-dimensional generators on surfaces, volume data and high-dimensional point clouds.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper introduces an efficient algorithm for persistence diagram computation, given an input piecewise linear scalar field <inline-formula><tex-math notation=\"LaTeX\">$f$</tex-math></inline-formula> defined on a <inline-formula><tex-math notation=\"LaTeX\">$d$</tex-math></inline-formula>-dimensional simplicial complex <inline-formula><tex-math notation=\"LaTeX\">$\\mathcal {K}$</tex-math></inline-formula>, with <inline-formula><tex-math notation=\"LaTeX\">$d \\leq 3$</tex-math></inline-formula>. Our work revisits the seminal algorithm <italic>“PairSimplices”</italic> [31], [103] with discrete Morse theory (DMT) [34], [80], which greatly reduces the number of input simplices to consider. Further, we also extend to DMT and accelerate the stratification strategy described in <italic>“PairSimplices”</italic> [31], [103] for the fast computation of the <inline-formula><tex-math notation=\"LaTeX\">$0^{th}$</tex-math></inline-formula> and <inline-formula><tex-math notation=\"LaTeX\">$(d-1)^{th}$</tex-math></inline-formula> diagrams, noted <inline-formula><tex-math notation=\"LaTeX\">$\\mathcal {D}_{0}(f)$</tex-math></inline-formula> and <inline-formula><tex-math notation=\"LaTeX\">$\\mathcal {D}_{d-1}(f)$</tex-math></inline-formula>. Minima-saddle persistence pairs (<inline-formula><tex-math notation=\"LaTeX\">$\\mathcal {D}_{0}(f)$</tex-math></inline-formula>) and saddle-maximum persistence pairs (<inline-formula><tex-math notation=\"LaTeX\">$\\mathcal {D}_{d-1}(f)$</tex-math></inline-formula>) are efficiently computed by processing , with a Union-Find , the unstable sets of 1-saddles and the stable sets of <inline-formula><tex-math notation=\"LaTeX\">$(d-1)$</tex-math></inline-formula>-saddles. We provide a detailed description of the (optional) handling of the boundary component of <inline-formula><tex-math notation=\"LaTeX\">$\\mathcal {K}$</tex-math></inline-formula> when processing <inline-formula><tex-math notation=\"LaTeX\">$(d-1)$</tex-math></inline-formula>-saddles. This fast pre-computation for the dimensions 0 and <inline-formula><tex-math notation=\"LaTeX\">$(d-1)$</tex-math></inline-formula> enables an aggressive specialization of [4] to the 3D case, which results in a drastic reduction of the number of input simplices for the computation of <inline-formula><tex-math notation=\"LaTeX\">$\\mathcal {D}_{1}(f)$</tex-math></inline-formula>, the intermediate layer of the <italic>sandwich</italic>. Finally, we document several performance improvements via shared-memory parallelism. We provide an open-source implementation of our algorithm for reproducibility purposes. We also contribute a reproducible benchmark package, which exploits three-dimensional data from a public repository and compares our algorithm to a variety of publicly available implementations. Extensive experiments indicate that our algorithm improves by two orders of magnitude the time performance of the seminal <italic>“PairSimplices”</italic> algorithm it extends. Moreover, it also improves memory footprint and time performance over a selection of 14 competing approaches, with a substantial gain over the fastest available approaches, while producing a strictly identical output. We illustrate the utility of our contributions with an application to the fast and robust extraction of persistent 1-dimensional generators on surfaces, volume data and high-dimensional point clouds.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper introduces an efficient algorithm for persistence diagram computation, given an input piecewise linear scalar field - defined on a --dimensional simplicial complex -, with -. Our work revisits the seminal algorithm “PairSimplices” [31], [103] with discrete Morse theory (DMT) [34], [80], which greatly reduces the number of input simplices to consider. Further, we also extend to DMT and accelerate the stratification strategy described in “PairSimplices” [31], [103] for the fast computation of the - and - diagrams, noted - and -. Minima-saddle persistence pairs (-) and saddle-maximum persistence pairs (-) are efficiently computed by processing , with a Union-Find , the unstable sets of 1-saddles and the stable sets of --saddles. We provide a detailed description of the (optional) handling of the boundary component of - when processing --saddles. This fast pre-computation for the dimensions 0 and - enables an aggressive specialization of [4] to the 3D case, which results in a drastic reduction of the number of input simplices for the computation of -, the intermediate layer of the sandwich. Finally, we document several performance improvements via shared-memory parallelism. We provide an open-source implementation of our algorithm for reproducibility purposes. We also contribute a reproducible benchmark package, which exploits three-dimensional data from a public repository and compares our algorithm to a variety of publicly available implementations. Extensive experiments indicate that our algorithm improves by two orders of magnitude the time performance of the seminal “PairSimplices” algorithm it extends. Moreover, it also improves memory footprint and time performance over a selection of 14 competing approaches, with a substantial gain over the fastest available approaches, while producing a strictly identical output. We illustrate the utility of our contributions with an application to the fast and robust extraction of persistent 1-dimensional generators on surfaces, volume data and high-dimensional point clouds.",
"title": "Discrete Morse Sandwich: Fast Computation of Persistence Diagrams for Scalar Data – An Algorithm and A Benchmark",
"normalizedTitle": "Discrete Morse Sandwich: Fast Computation of Persistence Diagrams for Scalar Data – An Algorithm and A Benchmark",
"fno": "10021892",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Mining",
"Feature Extraction",
"Data Visualization",
"Data Analysis",
"Three Dimensional Displays",
"Stars",
"Sea Surface",
"Topological Data Analysis",
"Scalar Data",
"Persistence Diagrams",
"Discrete Morse Theory"
],
"authors": [
{
"givenName": "Pierre",
"surname": "Guillou",
"fullName": "Pierre Guillou",
"affiliation": "CNRS and Sorbonne Université, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jules",
"surname": "Vidal",
"fullName": "Jules Vidal",
"affiliation": "CNRS and Sorbonne Université, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Julien",
"surname": "Tierny",
"fullName": "Julien Tierny",
"affiliation": "CNRS and Sorbonne Université, France",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-18",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tk/5555/01/09795326",
"title": "Least-Mean-Squares Coresets for Infinite Streams",
"doi": null,
"abstractUrl": "/journal/tk/5555/01/09795326/1Ecp81i5G00",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tq/5555/01/09842386",
"title": "Verifiable Homomorphic Secret Sharing for Low Degree Polynomials",
"doi": null,
"abstractUrl": "/journal/tq/5555/01/09842386/1FlM51CnLeo",
"parentPublication": {
"id": "trans/tq",
"title": "IEEE Transactions on Dependable and Secure Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/5555/01/09893402",
"title": "Structured Sparse Non-negative Matrix Factorization with <inline-formula><tex-math notation=\"LaTeX\">Z_$\\ell _{2,0}$_Z</tex-math></inline-formula>-Norm",
"doi": null,
"abstractUrl": "/journal/tk/5555/01/09893402/1GGLdY0vH0c",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/cc/5555/01/09987649",
"title": "Anonymous Aggregate Fine-Grained Cloud Data Verification System for Smart Health",
"doi": null,
"abstractUrl": "/journal/cc/5555/01/09987649/1J7RK3HdSpO",
"parentPublication": {
"id": "trans/cc",
"title": "IEEE Transactions on Cloud Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tq/5555/01/10032622",
"title": "Tianji: Securing A Practical Asynchronous Multi-User ORAM",
"doi": null,
"abstractUrl": "/journal/tq/5555/01/10032622/1KnSwOy3LmE",
"parentPublication": {
"id": "trans/tq",
"title": "IEEE Transactions on Dependable and Secure Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/si/2023/04/10036482",
"title": "Test Data Compression for Transparent-Scan Sequences",
"doi": null,
"abstractUrl": "/journal/si/2023/04/10036482/1KxPXOB1Ogg",
"parentPublication": {
"id": "trans/si",
"title": "IEEE Transactions on Very Large Scale Integration (VLSI) Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/5555/01/10093117",
"title": "An Efficient Algorithm for Hamiltonian Path Embedding of <inline-formula><tex-math notation=\"LaTeX\">Z_$k$_Z</tex-math></inline-formula>-Ary <inline-formula><tex-math notation=\"LaTeX\">Z_$n$_Z</tex-math></inline-formula>-Cubes under the Partitioned Edge Fault Model",
"doi": null,
"abstractUrl": "/journal/td/5555/01/10093117/1M61XDsMpB6",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2022/01/09037115",
"title": "Aligning Points to Lines: Provable Approximations",
"doi": null,
"abstractUrl": "/journal/tk/2022/01/09037115/1igMO6tI3Is",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2023/01/09420260",
"title": "Age Optimal Information Gathering and Dissemination on Graphs",
"doi": null,
"abstractUrl": "/journal/tm/2023/01/09420260/1tdUGrSPdg4",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/nt/2022/01/09523604",
"title": "Universal Scaling of Distributed Queues Under Load Balancing in the Super-Halfin-Whitt Regime",
"doi": null,
"abstractUrl": "/journal/nt/2022/01/09523604/1wnL9MNq6Vq",
"parentPublication": {
"id": "trans/nt",
"title": "IEEE/ACM Transactions on Networking",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10021890",
"articleId": "1K3XC5MZdGE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10021888",
"articleId": "1K3XDZ8pUAg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1K9sskESoSY",
"name": "ttg555501-010021892s1-supp1-3238008.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010021892s1-supp1-3238008.pdf",
"extension": "pdf",
"size": "2.2 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1K0DFSXIg5W",
"doi": "10.1109/TVCG.2023.3237739",
"abstract": "If the video has long been mentioned as a widespread visualization form, the animation sequence in the video is mentioned as storytelling for people. Producing an animation requires intensive human labor from skilled professional artists to obtain plausible animation in both content and motion direction, incredibly for animations with complex content, multiple moving objects, and dense movement. This paper presents an interactive framework to generate new sequences according to the users' preference on the starting frame. The critical contrast of our approach versus prior work and existing commercial applications is that novel sequences with arbitrary starting frame are produced by our system with a consistent degree in both content and motion direction. To achieve this effectively, we first learn the feature correlation on the frameset of the given video through a proposed network called RSFNet. Then, we develop a novel path-finding algorithm, SDPF, which formulates the knowledge of motion directions of the source video to estimate the smooth and plausible sequences. The extensive experiments show that our framework can produce new animations on the cartoon and natural scenes and advance prior works and commercial applications to enable users to obtain more predictable results.",
"abstracts": [
{
"abstractType": "Regular",
"content": "If the video has long been mentioned as a widespread visualization form, the animation sequence in the video is mentioned as storytelling for people. Producing an animation requires intensive human labor from skilled professional artists to obtain plausible animation in both content and motion direction, incredibly for animations with complex content, multiple moving objects, and dense movement. This paper presents an interactive framework to generate new sequences according to the users' preference on the starting frame. The critical contrast of our approach versus prior work and existing commercial applications is that novel sequences with arbitrary starting frame are produced by our system with a consistent degree in both content and motion direction. To achieve this effectively, we first learn the feature correlation on the frameset of the given video through a proposed network called RSFNet. Then, we develop a novel path-finding algorithm, SDPF, which formulates the knowledge of motion directions of the source video to estimate the smooth and plausible sequences. The extensive experiments show that our framework can produce new animations on the cartoon and natural scenes and advance prior works and commercial applications to enable users to obtain more predictable results.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "If the video has long been mentioned as a widespread visualization form, the animation sequence in the video is mentioned as storytelling for people. Producing an animation requires intensive human labor from skilled professional artists to obtain plausible animation in both content and motion direction, incredibly for animations with complex content, multiple moving objects, and dense movement. This paper presents an interactive framework to generate new sequences according to the users' preference on the starting frame. The critical contrast of our approach versus prior work and existing commercial applications is that novel sequences with arbitrary starting frame are produced by our system with a consistent degree in both content and motion direction. To achieve this effectively, we first learn the feature correlation on the frameset of the given video through a proposed network called RSFNet. Then, we develop a novel path-finding algorithm, SDPF, which formulates the knowledge of motion directions of the source video to estimate the smooth and plausible sequences. The extensive experiments show that our framework can produce new animations on the cartoon and natural scenes and advance prior works and commercial applications to enable users to obtain more predictable results.",
"title": "Regenerating Arbitrary Video Sequences with Distillation Path-Finding",
"normalizedTitle": "Regenerating Arbitrary Video Sequences with Distillation Path-Finding",
"fno": "10018537",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Animation",
"Feature Extraction",
"Correlation",
"Shape",
"Video Sequences",
"Prediction Algorithms",
"Measurement",
"Animation",
"Sequencing",
"RSF Net",
"Distillation",
"SDPF"
],
"authors": [
{
"givenName": "Thi-Ngoc-Hanh",
"surname": "Le",
"fullName": "Thi-Ngoc-Hanh Le",
"affiliation": "Department of Computer Science and Information Engineering, National Cheng-Kung University, Taiwan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sheng-Yi",
"surname": "Yao",
"fullName": "Sheng-Yi Yao",
"affiliation": "Department of Computer Science and Information Engineering, National Cheng-Kung University, Taiwan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chun-Te",
"surname": "Wu",
"fullName": "Chun-Te Wu",
"affiliation": "Department of Computer Science and Information Engineering, National Cheng-Kung University, Taiwan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tong-Yee",
"surname": "Lee",
"fullName": "Tong-Yee Lee",
"affiliation": "Department of Computer Science and Information Engineering, National Cheng-Kung University, Taiwan",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2009/4442/0/05457434",
"title": "Shape-Colour Histograms for matching 3D video sequences",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2009/05457434/12OmNqESudb",
"parentPublication": {
"id": "proceedings/iccvw/2009/4442/0",
"title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206626",
"title": "Human motion synthesis from 3D video",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206626/12OmNwE9ONo",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2007/1016/0/04284814",
"title": "An Efficient Markerless Method for Resynthesizing Facial Animation on an Anatomy-Based Model",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2007/04284814/12OmNx6PiFO",
"parentPublication": {
"id": "proceedings/icme/2007/1016/0",
"title": "2007 International Conference on Multimedia & Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2013/5051/0/5051a011",
"title": "Progressive Animation Sequences",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2013/5051a011/12OmNxR5UN3",
"parentPublication": {
"id": "proceedings/cgiv/2013/5051/0",
"title": "2013 10th International Conference Computer Graphics, Imaging and Visualization (CGIV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2004/2158/1/01315108",
"title": "Synchronizing video sequences",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2004/01315108/12OmNyRg4so",
"parentPublication": {
"id": "proceedings/cvpr/2004/2158/1",
"title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1993/06/i0569",
"title": "Analysis and Synthesis of Facial Image Sequences Using Physical and Anatomical Models",
"doi": null,
"abstractUrl": "/journal/tp/1993/06/i0569/13rRUwInvg4",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2017/04/mcg2017040030",
"title": "Data-Driven Approach to Synthesizing Facial Animation Using Motion Capture",
"doi": null,
"abstractUrl": "/magazine/cg/2017/04/mcg2017040030/13rRUyeTVkv",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2000/04/v0360",
"title": "Perception-Based Fast Rendering and Antialiasing of Walkthrough Sequences",
"doi": null,
"abstractUrl": "/journal/tg/2000/04/v0360/13rRUyuegoV",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/07/08960398",
"title": "Data-Driven 3D Neck Modeling and Animation",
"doi": null,
"abstractUrl": "/journal/tg/2021/07/08960398/1gC2pML2yuk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aipr/2020/8243/0/09425109",
"title": "Interactive Global Mosaic Stitching from Mesentery Video Sequences",
"doi": null,
"abstractUrl": "/proceedings-article/aipr/2020/09425109/1tuA1BrNwYM",
"parentPublication": {
"id": "proceedings/aipr/2020/8243/0",
"title": "2020 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10018173",
"articleId": "1JYZ6TXyjgk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10021890",
"articleId": "1K3XC5MZdGE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1K2ivlSmRm8",
"name": "ttg555501-010018537s1-supp1-3237739.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010018537s1-supp1-3237739.pdf",
"extension": "pdf",
"size": "4.99 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1JYZ6TXyjgk",
"doi": "10.1109/TVCG.2023.3235538",
"abstract": "For 3D animators, choreography with artificial intelligence has attracted more attention recently. However, most existing deep learning methods mainly rely on music for dance generation and lack sufficient control over generated dance motions. To address this issue, we introduce the idea of keyframe interpolation for music-driven dance generation and present a novel transition generation technique for choreography. Specifically, this technique synthesizes visually diverse and plausible dance motions by using normalizing flows to learn the probability distribution of dance motions conditioned on a piece of music and a sparse set of key poses. Thus, the generated dance motions respect both the input musical beats and the key poses. To achieve a robust transition of varying lengths between the key poses, we introduce a time embedding at each timestep as an additional condition. Extensive experiments show that our model generates more realistic, diverse, and beat-matching dance motions than the compared state-of-the-art methods, both qualitatively and quantitatively. Our experimental results demonstrate the superiority of the keyframe-based control for improving the diversity of the generated dance motions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "For 3D animators, choreography with artificial intelligence has attracted more attention recently. However, most existing deep learning methods mainly rely on music for dance generation and lack sufficient control over generated dance motions. To address this issue, we introduce the idea of keyframe interpolation for music-driven dance generation and present a novel transition generation technique for choreography. Specifically, this technique synthesizes visually diverse and plausible dance motions by using normalizing flows to learn the probability distribution of dance motions conditioned on a piece of music and a sparse set of key poses. Thus, the generated dance motions respect both the input musical beats and the key poses. To achieve a robust transition of varying lengths between the key poses, we introduce a time embedding at each timestep as an additional condition. Extensive experiments show that our model generates more realistic, diverse, and beat-matching dance motions than the compared state-of-the-art methods, both qualitatively and quantitatively. Our experimental results demonstrate the superiority of the keyframe-based control for improving the diversity of the generated dance motions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "For 3D animators, choreography with artificial intelligence has attracted more attention recently. However, most existing deep learning methods mainly rely on music for dance generation and lack sufficient control over generated dance motions. To address this issue, we introduce the idea of keyframe interpolation for music-driven dance generation and present a novel transition generation technique for choreography. Specifically, this technique synthesizes visually diverse and plausible dance motions by using normalizing flows to learn the probability distribution of dance motions conditioned on a piece of music and a sparse set of key poses. Thus, the generated dance motions respect both the input musical beats and the key poses. To achieve a robust transition of varying lengths between the key poses, we introduce a time embedding at each timestep as an additional condition. Extensive experiments show that our model generates more realistic, diverse, and beat-matching dance motions than the compared state-of-the-art methods, both qualitatively and quantitatively. Our experimental results demonstrate the superiority of the keyframe-based control for improving the diversity of the generated dance motions.",
"title": "Keyframe Control of Music-driven 3D Dance Generation",
"normalizedTitle": "Keyframe Control of Music-driven 3D Dance Generation",
"fno": "10018173",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Humanities",
"Animation",
"Three Dimensional Displays",
"Deep Learning",
"Probabilistic Logic",
"Interpolation",
"Task Analysis",
"3 D Animation",
"Generative Flows",
"Multi Modal",
"Music Driven",
"Choreography"
],
"authors": [
{
"givenName": "Zhipeng",
"surname": "Yang",
"fullName": "Zhipeng Yang",
"affiliation": "Beijing Key Laboratory of Mobile Computing and Pervasive Device, Institute of Computing Technology, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yu-Hui",
"surname": "Wen",
"fullName": "Yu-Hui Wen",
"affiliation": "CS Dept, BNRist, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shu-Yu",
"surname": "Chen",
"fullName": "Shu-Yu Chen",
"affiliation": "Beijing Key Laboratory of Mobile Computing and Pervasive Device, Institute of Computing Technology, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiao",
"surname": "Liu",
"fullName": "Xiao Liu",
"affiliation": "Tomorrow Advancing Life Education Group, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yuan",
"surname": "Gao",
"fullName": "Yuan Gao",
"affiliation": "Tomorrow Advancing Life Education Group, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yong-Jin",
"surname": "Liu",
"fullName": "Yong-Jin Liu",
"affiliation": "CS Dept, BNRist, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lin",
"surname": "Gao",
"fullName": "Lin Gao",
"affiliation": "Beijing Key Laboratory of Mobile Computing and Pervasive Device, Institute of Computing Technology, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hongbo",
"surname": "Fu",
"fullName": "Hongbo Fu",
"affiliation": "School of Creative Media, City University of Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cw/2014/4677/0/4677a253",
"title": "Sketch-Based Dance Choreography",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2014/4677a253/12OmNzaQoBR",
"parentPublication": {
"id": "proceedings/cw/2014/4677/0",
"title": "2014 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/03/ttg2012030501",
"title": "Example-Based Automatic Music-Driven Conventional Dance Motion Synthesis",
"doi": null,
"abstractUrl": "/journal/tg/2012/03/ttg2012030501/13rRUwwaKt6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200n3381",
"title": "AI Choreographer: Music Conditioned 3D Dance Generation with AIST++",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200n3381/1BmJ1TiWSB2",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09745335",
"title": "Rhythm is a Dancer: Music-Driven Motion Synthesis with Global Structure",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09745335/1CagHUR61pe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nicoint/2022/6908/0/690800a104",
"title": "Prototype System of Dance Movement Creation by VR Experience of Augmented Human Body",
"doi": null,
"abstractUrl": "/proceedings-article/nicoint/2022/690800a104/1FWn0mibOE0",
"parentPublication": {
"id": "proceedings/nicoint/2022/6908/0",
"title": "2022 Nicograph International (NicoInt)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600l1040",
"title": "Bailando: 3D Dance Generation by Actor-Critic GPT with Choreographic Memory",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600l1040/1H1kphKCKPu",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600d480",
"title": "A Brand New Dance Partner: Music-Conditioned Pluralistic Dancing Controlled by Multiple Dance Genres",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600d480/1H1lISb1OjS",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icedcs/2022/5541/0/554100a351",
"title": "Electronic Dance Music Classification Based on Machine Learning Methods",
"doi": null,
"abstractUrl": "/proceedings-article/icedcs/2022/554100a351/1JC1pICXsEo",
"parentPublication": {
"id": "proceedings/icedcs/2022/5541/0",
"title": "2022 International Conference on Electronics and Devices, Computational Science (ICEDCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isaiam/2021/3260/0/326000a055",
"title": "AutoDance: Music Driven Dance Generation",
"doi": null,
"abstractUrl": "/proceedings-article/isaiam/2021/326000a055/1wiQVBNgFhe",
"parentPublication": {
"id": "proceedings/isaiam/2021/3260/0",
"title": "2021 International Symposium on Artificial Intelligence and its Application on Media (ISAIAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2021/1865/0/186500a348",
"title": "Dance to Music: Generative Choreography with Music using Mixture Density Networks",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2021/186500a348/1xPslGYA8Gk",
"parentPublication": {
"id": "proceedings/mipr/2021/1865/0",
"title": "2021 IEEE 4th International Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10015807",
"articleId": "1JSl47Z1P7q",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10018537",
"articleId": "1K0DFSXIg5W",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1K0DFltTHS8",
"name": "ttg555501-010018173s1-supp1-3235538.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010018173s1-supp1-3235538.mp4",
"extension": "mp4",
"size": "78.6 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1JR6dVW7wJi",
"doi": "10.1109/TVCG.2023.3236061",
"abstract": "How will you repair a physical object with some missings? You may imagine its original shape from previously captured images, recover its overall (global) but coarse shape first, and then refine its local details. We are motivated to imitate the physical repair procedure to address point cloud completion. To this end, we propose a cross-modal shape-transfer dual-refinement network (termed CSDN), a coarse-to-fine paradigm with images of full-cycle participation, for quality point cloud completion. CSDN mainly consists of “shape fusion” and “dual-refinement” modules to tackle the cross-modal challenge. The first module transfers the intrinsic shape characteristics from single images to guide the geometry generation of the missing regions of point clouds, in which we propose IPAdaIN to embed the global features of both the image and the partial point cloud into completion. The second module refines the coarse output by adjusting the positions of the generated points, where the local refinement unit exploits the geometric relation between the novel and the input points by graph convolution, and the global constraint unit utilizes the input image to fine-tune the generated offset. Different from most existing approaches, CSDN not only explores the complementary information from images but also effectively exploits cross-modal data in the <italic>whole</italic> coarse-to-fine completion procedure. Experimental results indicate that CSDN performs favorably against twelve competitors on the cross-modal benchmark.",
"abstracts": [
{
"abstractType": "Regular",
"content": "How will you repair a physical object with some missings? You may imagine its original shape from previously captured images, recover its overall (global) but coarse shape first, and then refine its local details. We are motivated to imitate the physical repair procedure to address point cloud completion. To this end, we propose a cross-modal shape-transfer dual-refinement network (termed CSDN), a coarse-to-fine paradigm with images of full-cycle participation, for quality point cloud completion. CSDN mainly consists of “shape fusion” and “dual-refinement” modules to tackle the cross-modal challenge. The first module transfers the intrinsic shape characteristics from single images to guide the geometry generation of the missing regions of point clouds, in which we propose IPAdaIN to embed the global features of both the image and the partial point cloud into completion. The second module refines the coarse output by adjusting the positions of the generated points, where the local refinement unit exploits the geometric relation between the novel and the input points by graph convolution, and the global constraint unit utilizes the input image to fine-tune the generated offset. Different from most existing approaches, CSDN not only explores the complementary information from images but also effectively exploits cross-modal data in the <italic>whole</italic> coarse-to-fine completion procedure. Experimental results indicate that CSDN performs favorably against twelve competitors on the cross-modal benchmark.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "How will you repair a physical object with some missings? You may imagine its original shape from previously captured images, recover its overall (global) but coarse shape first, and then refine its local details. We are motivated to imitate the physical repair procedure to address point cloud completion. To this end, we propose a cross-modal shape-transfer dual-refinement network (termed CSDN), a coarse-to-fine paradigm with images of full-cycle participation, for quality point cloud completion. CSDN mainly consists of “shape fusion” and “dual-refinement” modules to tackle the cross-modal challenge. The first module transfers the intrinsic shape characteristics from single images to guide the geometry generation of the missing regions of point clouds, in which we propose IPAdaIN to embed the global features of both the image and the partial point cloud into completion. The second module refines the coarse output by adjusting the positions of the generated points, where the local refinement unit exploits the geometric relation between the novel and the input points by graph convolution, and the global constraint unit utilizes the input image to fine-tune the generated offset. Different from most existing approaches, CSDN not only explores the complementary information from images but also effectively exploits cross-modal data in the whole coarse-to-fine completion procedure. Experimental results indicate that CSDN performs favorably against twelve competitors on the cross-modal benchmark.",
"title": "CSDN: Cross-Modal Shape-Transfer Dual-Refinement Network for Point Cloud Completion",
"normalizedTitle": "CSDN: Cross-Modal Shape-Transfer Dual-Refinement Network for Point Cloud Completion",
"fno": "10015045",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Shape",
"Point Cloud Compression",
"Three Dimensional Displays",
"Maintenance Engineering",
"Geometry",
"Transformers",
"Fuses",
"CSDN",
"Cross Modality",
"Multi Feature Fusion",
"Point Cloud Completion"
],
"authors": [
{
"givenName": "Zhe",
"surname": "Zhu",
"fullName": "Zhe Zhu",
"affiliation": "School of Computer Science and Technology, Nanjing University of Aeronautics and Astronautics, Nanjing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Liangliang",
"surname": "Nan",
"fullName": "Liangliang Nan",
"affiliation": "Urban Data Science Section, Delft University of Technology, Delft, Netherlands",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Haoran",
"surname": "Xie",
"fullName": "Haoran Xie",
"affiliation": "Department of Computing and Decision Sciences, Lingnan University, Hong Kong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Honghua",
"surname": "Chen",
"fullName": "Honghua Chen",
"affiliation": "School of Computer Science and Technology, Nanjing University of Aeronautics and Astronautics, Nanjing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jun",
"surname": "Wang",
"fullName": "Jun Wang",
"affiliation": "School of Computer Science and Technology, Nanjing University of Aeronautics and Astronautics, Nanjing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mingqiang",
"surname": "Wei",
"fullName": "Mingqiang Wei",
"affiliation": "School of Computer Science and Technology, Nanjing University of Aeronautics and Astronautics, Nanjing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jing",
"surname": "Qin",
"fullName": "Jing Qin",
"affiliation": "School of Nursing, The Hong Kong, Hong Kong, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-18",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tp/2023/01/09735342",
"title": "PMP-Net++: Point Cloud Completion by Transformer-Enhanced Multi-Step Point Moving Paths",
"doi": null,
"abstractUrl": "/journal/tp/2023/01/09735342/1BLmVZBJX6o",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200m2468",
"title": "ME-PCN: Point Completion Conditioned on Mask Emptiness",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200m2468/1BmEuGPb47C",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200m2488",
"title": "RFNet: Recurrent Forward Network for Dense Point Cloud Completion",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200m2488/1BmGZBNPhja",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200f806",
"title": "3D Shape Generation and Completion through Point-Voxel Diffusion",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200f806/1BmHiEgI4q4",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09804851",
"title": "Point Cloud Completion Via Skeleton-Detail Transformer",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09804851/1ErlpBk8JBS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859668",
"title": "HFF-Net: Hierarchical Feature Fusion Network for Point Cloud Generation with Point Transformers",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859668/1G9DKBzb6I8",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600f533",
"title": "Learning a Structured Latent Space for Unsupervised Point Cloud Completion",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600f533/1H0KOsU2FZC",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600i553",
"title": "X -Trans2Cap: Cross-Modal Knowledge Transfer using Transformer for 3D Dense Captioning",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600i553/1H0NZQRh3sA",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2022/9744/0/974400a976",
"title": "A Cross-Modal Object-Aware Transformer for Vision-and-Language Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2022/974400a976/1MrFWswSxzO",
"parentPublication": {
"id": "proceedings/ictai/2022/9744/0",
"title": "2022 IEEE 34th International Conference on Tools with Artificial Intelligence (ICTAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800b269",
"title": "GASCN: Graph Attention Shape Completion Network",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800b269/1zWEc53kN9u",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10012505",
"articleId": "1JNmJJBoNkQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10015807",
"articleId": "1JSl47Z1P7q",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1JSl47Z1P7q",
"doi": "10.1109/TVCG.2023.3236380",
"abstract": "Benchmark datasets play an important role in evaluating Natural Language Understanding (NLU) models. However, shortcuts—unwanted biases in the benchmark datasets—can damage the effectiveness of benchmark datasets in revealing models' real capabilities. Since shortcuts vary in coverage, productivity, and semantic meaning, it is challenging for NLU experts to systematically understand and avoid them when creating benchmark datasets. In this paper, we develop a visual analytics system, <italic>ShortcutLens</italic>, to help NLU experts explore shortcuts in NLU benchmark datasets. The system allows users to conduct multi-level exploration of shortcuts. Specifically, Statistics View helps users grasp the statistics such as coverage and productivity of shortcuts in the benchmark dataset. Template View employs hierarchical and interpretable templates to summarize different types of shortcuts. Instance View allows users to check the corresponding instances covered by the shortcuts. We conduct case studies and expert interviews to evaluate the effectiveness and usability of the system. The results demonstrate that <italic>ShortcutLens</italic> supports users in gaining a better understanding of benchmark dataset issues through shortcuts, inspiring them to create challenging and pertinent benchmark datasets.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Benchmark datasets play an important role in evaluating Natural Language Understanding (NLU) models. However, shortcuts—unwanted biases in the benchmark datasets—can damage the effectiveness of benchmark datasets in revealing models' real capabilities. Since shortcuts vary in coverage, productivity, and semantic meaning, it is challenging for NLU experts to systematically understand and avoid them when creating benchmark datasets. In this paper, we develop a visual analytics system, <italic>ShortcutLens</italic>, to help NLU experts explore shortcuts in NLU benchmark datasets. The system allows users to conduct multi-level exploration of shortcuts. Specifically, Statistics View helps users grasp the statistics such as coverage and productivity of shortcuts in the benchmark dataset. Template View employs hierarchical and interpretable templates to summarize different types of shortcuts. Instance View allows users to check the corresponding instances covered by the shortcuts. We conduct case studies and expert interviews to evaluate the effectiveness and usability of the system. The results demonstrate that <italic>ShortcutLens</italic> supports users in gaining a better understanding of benchmark dataset issues through shortcuts, inspiring them to create challenging and pertinent benchmark datasets.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Benchmark datasets play an important role in evaluating Natural Language Understanding (NLU) models. However, shortcuts—unwanted biases in the benchmark datasets—can damage the effectiveness of benchmark datasets in revealing models' real capabilities. Since shortcuts vary in coverage, productivity, and semantic meaning, it is challenging for NLU experts to systematically understand and avoid them when creating benchmark datasets. In this paper, we develop a visual analytics system, ShortcutLens, to help NLU experts explore shortcuts in NLU benchmark datasets. The system allows users to conduct multi-level exploration of shortcuts. Specifically, Statistics View helps users grasp the statistics such as coverage and productivity of shortcuts in the benchmark dataset. Template View employs hierarchical and interpretable templates to summarize different types of shortcuts. Instance View allows users to check the corresponding instances covered by the shortcuts. We conduct case studies and expert interviews to evaluate the effectiveness and usability of the system. The results demonstrate that ShortcutLens supports users in gaining a better understanding of benchmark dataset issues through shortcuts, inspiring them to create challenging and pertinent benchmark datasets.",
"title": "ShortcutLens: A Visual Analytics Approach for Exploring Shortcuts in Natural Language Understanding Dataset",
"normalizedTitle": "ShortcutLens: A Visual Analytics Approach for Exploring Shortcuts in Natural Language Understanding Dataset",
"fno": "10015807",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Benchmark Testing",
"Task Analysis",
"Natural Language Processing",
"Cognition",
"Guidelines",
"Predictive Models",
"Computational Modeling",
"Visual Analytics",
"Natural Language Understanding",
"Shortcut"
],
"authors": [
{
"givenName": "Zhihua",
"surname": "Jin",
"fullName": "Zhihua Jin",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xingbo",
"surname": "Wang",
"fullName": "Xingbo Wang",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Furui",
"surname": "Cheng",
"fullName": "Furui Cheng",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chunhui",
"surname": "Sun",
"fullName": "Chunhui Sun",
"affiliation": "Peking University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Qun",
"surname": "Liu",
"fullName": "Qun Liu",
"affiliation": "Huawei Noah's Ark Lab, Hong Kong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huamin",
"surname": "Qu",
"fullName": "Huamin Qu",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2016/8851/0/8851d213",
"title": "The Cityscapes Dataset for Semantic Urban Scene Understanding",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851d213/12OmNAtaS0N",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2015/0163/0/0163a218",
"title": "A Synthesis of Stochastic Petri Net (SPN) Graphs for Natural Language Understanding (NLU) Event/Action Association",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2015/0163a218/12OmNwqx4a5",
"parentPublication": {
"id": "proceedings/ictai/2015/0163/0",
"title": "2015 IEEE 27th International Conference on Tools with Artificial Intelligence (ICTAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/tai/1993/4200/0/00633965",
"title": "CARAMEL: A step towards reflection in natural language understanding systems",
"doi": null,
"abstractUrl": "/proceedings-article/tai/1993/00633965/12OmNzlUKBS",
"parentPublication": {
"id": "proceedings/tai/1993/4200/0",
"title": "Proceedings of 1993 IEEE Conference on Tools with Al (TAI-93)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1995/05/i0449",
"title": "The Application of Semantic Classification Trees to Natural Language Understanding",
"doi": null,
"abstractUrl": "/journal/tp/1995/05/i0449/13rRUxBa5sH",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2021/3902/0/09671571",
"title": "Language-Agnostic and Language-Aware Multilingual Natural Language Understanding for Large-Scale Intelligent Voice Assistant Application",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2021/09671571/1A8gBljOiTC",
"parentPublication": {
"id": "proceedings/big-data/2021/3902/0",
"title": "2021 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200b224",
"title": "e-ViL: A Dataset and Benchmark for Natural Language Explanations in Vision-Language Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200b224/1BmFPQSK4pO",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cecit/2021/3757/0/375700a111",
"title": "A Parameter-Adaptive Convolution Neural Network for Capturing the Context-Specific Information in Natural Language Understanding",
"doi": null,
"abstractUrl": "/proceedings-article/cecit/2021/375700a111/1CdEX25h5aU",
"parentPublication": {
"id": "proceedings/cecit/2021/3757/0",
"title": "2021 2nd International Conference on Electronics, Communications and Information Technology (CECIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/03/09796582",
"title": "CASIA-E: A Large Comprehensive Dataset for Gait Recognition",
"doi": null,
"abstractUrl": "/journal/tp/2023/03/09796582/1EexjnGsWgo",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/re/2022/7000/0/700000a064",
"title": "CADE: The Missing Benchmark in Evaluating Dataset Requirements of AI-enabled Software",
"doi": null,
"abstractUrl": "/proceedings-article/re/2022/700000a064/1HBKsMoFemY",
"parentPublication": {
"id": "proceedings/re/2022/7000/0",
"title": "2022 IEEE 30th International Requirements Engineering Conference (RE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2019/5584/0/558400a420",
"title": "Recent Trends in Natural Language Understanding for Procedural Knowledge",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2019/558400a420/1jdDTPSAcb6",
"parentPublication": {
"id": "proceedings/csci/2019/5584/0",
"title": "2019 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10015045",
"articleId": "1JR6dVW7wJi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10018173",
"articleId": "1JYZ6TXyjgk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1JNmJ1M4N9e",
"doi": "10.1109/TVCG.2023.3235277",
"abstract": "Obtaining accurate SVBRDFs from 2D photographs of shiny, heterogeneous 3D objects is a highly sought-after goal for domains like cultural heritage archiving, where it is critical to document color appearance in high fidelity. In prior work such as the promising framework by Nam et al. [1], the problem is simplified by assuming that specular highlights exhibit symmetry and isotropy about an estimated surface normal. The present work builds on this foundation with several significant modifications. Recognizing the importance of the surface normal as an axis of symmetry, we compare nonlinear optimization for normals with a linear approximation proposed by Nam et al. and find that nonlinear optimization is superior to the linear approximation, while noting that the surface normal estimates generally have a very significant impact on the reconstructed color appearance of the object. We also examine the use of a monotonicity constraint for reflectance and develop a generalization that also enforces continuity and smoothness when optimizing continuous monotonic functions like a microfacet distribution. Finally, we explore the impact of simplifying from an arbitrary 1D basis function to a traditional parametric microfacet distribution (GGX), and we find this to be a reasonable approximation that trades some fidelity for practicality in certain applications. Both representations can be used in existing rendering architectures like game engines or online 3D viewers, while retaining accurate color appearance for fidelity-critical applications like cultural heritage or online sales.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Obtaining accurate SVBRDFs from 2D photographs of shiny, heterogeneous 3D objects is a highly sought-after goal for domains like cultural heritage archiving, where it is critical to document color appearance in high fidelity. In prior work such as the promising framework by Nam et al. [1], the problem is simplified by assuming that specular highlights exhibit symmetry and isotropy about an estimated surface normal. The present work builds on this foundation with several significant modifications. Recognizing the importance of the surface normal as an axis of symmetry, we compare nonlinear optimization for normals with a linear approximation proposed by Nam et al. and find that nonlinear optimization is superior to the linear approximation, while noting that the surface normal estimates generally have a very significant impact on the reconstructed color appearance of the object. We also examine the use of a monotonicity constraint for reflectance and develop a generalization that also enforces continuity and smoothness when optimizing continuous monotonic functions like a microfacet distribution. Finally, we explore the impact of simplifying from an arbitrary 1D basis function to a traditional parametric microfacet distribution (GGX), and we find this to be a reasonable approximation that trades some fidelity for practicality in certain applications. Both representations can be used in existing rendering architectures like game engines or online 3D viewers, while retaining accurate color appearance for fidelity-critical applications like cultural heritage or online sales.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Obtaining accurate SVBRDFs from 2D photographs of shiny, heterogeneous 3D objects is a highly sought-after goal for domains like cultural heritage archiving, where it is critical to document color appearance in high fidelity. In prior work such as the promising framework by Nam et al. [1], the problem is simplified by assuming that specular highlights exhibit symmetry and isotropy about an estimated surface normal. The present work builds on this foundation with several significant modifications. Recognizing the importance of the surface normal as an axis of symmetry, we compare nonlinear optimization for normals with a linear approximation proposed by Nam et al. and find that nonlinear optimization is superior to the linear approximation, while noting that the surface normal estimates generally have a very significant impact on the reconstructed color appearance of the object. We also examine the use of a monotonicity constraint for reflectance and develop a generalization that also enforces continuity and smoothness when optimizing continuous monotonic functions like a microfacet distribution. Finally, we explore the impact of simplifying from an arbitrary 1D basis function to a traditional parametric microfacet distribution (GGX), and we find this to be a reasonable approximation that trades some fidelity for practicality in certain applications. Both representations can be used in existing rendering architectures like game engines or online 3D viewers, while retaining accurate color appearance for fidelity-critical applications like cultural heritage or online sales.",
"title": "High-Fidelity Specular SVBRDF Acquisition from Flash Photographs",
"normalizedTitle": "High-Fidelity Specular SVBRDF Acquisition from Flash Photographs",
"fno": "10012127",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Reflectivity",
"Rendering Computer Graphics",
"Three Dimensional Displays",
"Optimization",
"Geometry",
"Image Color Analysis",
"Lighting",
"SVBRDF Acquisition",
"Normal Map Refinement",
"Non Linear Optimization",
"Computational Photography",
"Flash Photography",
"Photogrammetry",
"Image Based Relighting",
"Real Time Rendering"
],
"authors": [
{
"givenName": "Michael",
"surname": "Tetzlaff",
"fullName": "Michael Tetzlaff",
"affiliation": "Department of Mathematics, Statistics, and Computer Science, University of Wisconsin – Stout, Menomonie, WI, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2021/2812/0/281200m2799",
"title": "Towards High Fidelity Monocular Face Reconstruction with Rich Reflectance using Self-supervised Learning and Ray Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200m2799/1BmJb3RcOGY",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2022/5851/0/09887646",
"title": "Differentiable Appearance Acquisition from a Flash/No-flash RGB-D Pair",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2022/09887646/1GZixnSNfiM",
"parentPublication": {
"id": "proceedings/iccp/2022/5851/0",
"title": "2022 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600p5851",
"title": "DoubleField: Bridging the Neural Surface and Radiance Fields for High-fidelity Human Reconstruction and Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600p5851/1H1ngsFUdwY",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2023/4544/0/10042713",
"title": "S2F2: Self-Supervised High Fidelity Face Reconstruction from Monocular Image",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2023/10042713/1KOuX36iVRm",
"parentPublication": {
"id": "proceedings/fg/2023/4544/0",
"title": "2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300i728",
"title": "Deep Appearance Maps",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300i728/1hQqwkq2afC",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800f416",
"title": "Neural Voxel Renderer: Learning an Accurate and Controllable Rendering Tool",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800f416/1m3nYbnokEM",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800b186",
"title": "Shape from Tracing: Towards Reconstructing 3D Object Geometry and SVBRDF Material from Images via Differentiable Path Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800b186/1qyxkY66O08",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2021/1952/0/09466274",
"title": "View-dependent Scene Appearance Synthesis using Inverse Rendering from Light Fields",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2021/09466274/1uSSV7tRhSw",
"parentPublication": {
"id": "proceedings/iccp/2021/1952/0",
"title": "2021 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900f449",
"title": "PhySG: Inverse Rendering with Spherical Gaussians for Physics-based Material Editing and Relighting",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900f449/1yeIKNwhdsI",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900p5562",
"title": "Deep Polarization Imaging for 3D Shape and SVBRDF Acquisition",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900p5562/1yeLTHWN14c",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10008084",
"articleId": "1JIoM5ABwoU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10012505",
"articleId": "1JNmJJBoNkQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1JP1ymmGDW8",
"name": "ttg555501-010012127s1-supp1-3235277.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010012127s1-supp1-3235277.pdf",
"extension": "pdf",
"size": "165 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1JNmJJBoNkQ",
"doi": "10.1109/TVCG.2023.3235364",
"abstract": "The development of deep generative models has inspired various facial image editing methods, but many of them are difficult to be directly applied to video editing due to various challenges ranging from imposing 3D constraints, preserving identity consistency, ensuring temporal coherence, etc. To address these challenges, we propose a new framework operating on the StyleGAN2 latent space for identity-aware and shape-aware edit propagation on face videos. In order to reduce the difficulties of maintaining the identity, keeping the original 3D motion, and avoiding shape distortions, we disentangle the StyleGAN2 latent vectors of human face video frames to decouple the appearance, shape, expression, and motion from identity. An edit encoding module is used to map a sequence of image frames to continuous latent codes with 3D parametric control and is trained in a self-supervised manner with identity loss and triple shape losses. Our model supports propagation of edits in various forms: I. direct appearance editing on a specific keyframe, II. implicit editing of face shape via a given reference image, and III. existing latent-based semantic edits. Experiments show that our method works well for various forms of videos in the wild and outperforms an animation-based approach and the recent deep generative techniques.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The development of deep generative models has inspired various facial image editing methods, but many of them are difficult to be directly applied to video editing due to various challenges ranging from imposing 3D constraints, preserving identity consistency, ensuring temporal coherence, etc. To address these challenges, we propose a new framework operating on the StyleGAN2 latent space for identity-aware and shape-aware edit propagation on face videos. In order to reduce the difficulties of maintaining the identity, keeping the original 3D motion, and avoiding shape distortions, we disentangle the StyleGAN2 latent vectors of human face video frames to decouple the appearance, shape, expression, and motion from identity. An edit encoding module is used to map a sequence of image frames to continuous latent codes with 3D parametric control and is trained in a self-supervised manner with identity loss and triple shape losses. Our model supports propagation of edits in various forms: I. direct appearance editing on a specific keyframe, II. implicit editing of face shape via a given reference image, and III. existing latent-based semantic edits. Experiments show that our method works well for various forms of videos in the wild and outperforms an animation-based approach and the recent deep generative techniques.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The development of deep generative models has inspired various facial image editing methods, but many of them are difficult to be directly applied to video editing due to various challenges ranging from imposing 3D constraints, preserving identity consistency, ensuring temporal coherence, etc. To address these challenges, we propose a new framework operating on the StyleGAN2 latent space for identity-aware and shape-aware edit propagation on face videos. In order to reduce the difficulties of maintaining the identity, keeping the original 3D motion, and avoiding shape distortions, we disentangle the StyleGAN2 latent vectors of human face video frames to decouple the appearance, shape, expression, and motion from identity. An edit encoding module is used to map a sequence of image frames to continuous latent codes with 3D parametric control and is trained in a self-supervised manner with identity loss and triple shape losses. Our model supports propagation of edits in various forms: I. direct appearance editing on a specific keyframe, II. implicit editing of face shape via a given reference image, and III. existing latent-based semantic edits. Experiments show that our method works well for various forms of videos in the wild and outperforms an animation-based approach and the recent deep generative techniques.",
"title": "Identity-Aware and Shape-Aware Propagation of Face Editing in Videos",
"normalizedTitle": "Identity-Aware and Shape-Aware Propagation of Face Editing in Videos",
"fno": "10012505",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Videos",
"Shape",
"Faces",
"Three Dimensional Displays",
"Codes",
"Aerospace Electronics",
"Semantics",
"Editing Propagation",
"Face Editing",
"Video Editing"
],
"authors": [
{
"givenName": "Yue-Ren",
"surname": "Jiang",
"fullName": "Yue-Ren Jiang",
"affiliation": "Beijing Key Laboratory of Mobile Computing and Pervasive Device, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shu-Yu",
"surname": "Chen",
"fullName": "Shu-Yu Chen",
"affiliation": "Beijing Key Laboratory of Mobile Computing and Pervasive Device, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hongbo",
"surname": "Fu",
"fullName": "Hongbo Fu",
"affiliation": "School of Creative Media, City University of Hong Kong, Hong Kong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lin",
"surname": "Gao",
"fullName": "Lin Gao",
"affiliation": "Beijing Key Laboratory of Mobile Computing and Pervasive Device, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/fg/2021/3176/0/09667059",
"title": "Emotion Editing in Head Reenactment Videos using Latent Space Manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2021/09667059/1A6BJzpRJcs",
"parentPublication": {
"id": "proceedings/fg/2021/3176/0",
"title": "2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200n3769",
"title": "A Latent Transformer for Disentangled Face Editing in Images and Videos",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200n3769/1BmHwMsaCD6",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200o4408",
"title": "Latent Transformations via NeuralODEs for GAN-based Image Editing",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200o4408/1BmKdSIin3W",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859605",
"title": "Spatial Attention Guided Local Facial Attribute Editing",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859605/1G9DufVBCk8",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600h673",
"title": "TransEditor: Transformer-Based Dual-Space GAN for Highly Controllable Facial Editing",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600h673/1H1mrnr7FoQ",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/04/09241434",
"title": "InterFaceGAN: Interpreting the Disentangled Face Representation Learned by GANs",
"doi": null,
"abstractUrl": "/journal/tp/2022/04/09241434/1ogEwfwfCjC",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900c950",
"title": "L2M-GAN: Learning to Manipulate Latent Space Semantics for Facial Attribute Editing",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900c950/1yeKxZd2yti",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900d670",
"title": "Navigating the GAN Parameter Space for Semantic Image Editing",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900d670/1yeM1iXyYXC",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/04/09668999",
"title": "Cross-Domain and Disentangled Face Manipulation With 3D Guidance",
"doi": null,
"abstractUrl": "/journal/tg/2023/04/09668999/1zTfZzq1wqY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccst/2021/4254/0/425400a520",
"title": "Improved Semantic-aware StyleGAN-based Real Face Editing Model",
"doi": null,
"abstractUrl": "/proceedings-article/iccst/2021/425400a520/1ziPdCmrGIE",
"parentPublication": {
"id": "proceedings/iccst/2021/4254/0",
"title": "2021 International Conference on Culture-oriented Science & Technology (ICCST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10012127",
"articleId": "1JNmJ1M4N9e",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10015045",
"articleId": "1JR6dVW7wJi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1K3XFhUqOsw",
"name": "ttg555501-010012505s1-tvcg-3235364-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010012505s1-tvcg-3235364-mm.zip",
"extension": "zip",
"size": "39.8 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1JH4iZQdoEU",
"doi": "10.1109/TVCG.2022.3231967",
"abstract": "Despite growing interest in probabilistic modeling approaches and availability of learning tools, people are hesitant to use them. There is a need for tools to communicate probabilistic models more intuitively and help users build, validate, use effectively or trust probabilistic models. We focus on visual representations of probabilistic models and introduce the Interactive Pair Plot (IPP) for visualization of a model's uncertainty, a scatter plot matrix of a probabilistic model allowing interactive conditioning on the model's variables. We investigate whether the use of interactive conditioning in a scatter plot matrix of a model helps users better understand variables' relations. We conducted a user study and the findings suggest that improvements in the understanding of the interaction group are the most pronounced for more exotic structures, such as hierarchical models or unfamiliar parameterizations, in comparison to the understanding of the static group. As the detail of the inferred information increases, interactive conditioning does not lead to considerably longer response times. Finally, interactive conditioning improves participants' confidence about their responses.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Despite growing interest in probabilistic modeling approaches and availability of learning tools, people are hesitant to use them. There is a need for tools to communicate probabilistic models more intuitively and help users build, validate, use effectively or trust probabilistic models. We focus on visual representations of probabilistic models and introduce the Interactive Pair Plot (IPP) for visualization of a model's uncertainty, a scatter plot matrix of a probabilistic model allowing interactive conditioning on the model's variables. We investigate whether the use of interactive conditioning in a scatter plot matrix of a model helps users better understand variables' relations. We conducted a user study and the findings suggest that improvements in the understanding of the interaction group are the most pronounced for more exotic structures, such as hierarchical models or unfamiliar parameterizations, in comparison to the understanding of the static group. As the detail of the inferred information increases, interactive conditioning does not lead to considerably longer response times. Finally, interactive conditioning improves participants' confidence about their responses.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Despite growing interest in probabilistic modeling approaches and availability of learning tools, people are hesitant to use them. There is a need for tools to communicate probabilistic models more intuitively and help users build, validate, use effectively or trust probabilistic models. We focus on visual representations of probabilistic models and introduce the Interactive Pair Plot (IPP) for visualization of a model's uncertainty, a scatter plot matrix of a probabilistic model allowing interactive conditioning on the model's variables. We investigate whether the use of interactive conditioning in a scatter plot matrix of a model helps users better understand variables' relations. We conducted a user study and the findings suggest that improvements in the understanding of the interaction group are the most pronounced for more exotic structures, such as hierarchical models or unfamiliar parameterizations, in comparison to the understanding of the static group. As the detail of the inferred information increases, interactive conditioning does not lead to considerably longer response times. Finally, interactive conditioning improves participants' confidence about their responses.",
"title": "Does Interactive Conditioning Help Users Better Understand the Structure of Probabilistic Models?",
"normalizedTitle": "Does Interactive Conditioning Help Users Better Understand the Structure of Probabilistic Models?",
"fno": "10007070",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Probabilistic Logic",
"Mathematical Models",
"Temperature Distribution",
"Visualization",
"Computational Modeling",
"Shape",
"Bayes Methods",
"Brushing And Linking",
"Empirical Study",
"Interactive Conditioning",
"Prior Distribution",
"Probabilistic Models",
"Scatter Plot Matrix"
],
"authors": [
{
"givenName": "Evdoxia",
"surname": "Taka",
"fullName": "Evdoxia Taka",
"affiliation": "School of Computing Science, University of Glasgow, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sebastian",
"surname": "Stein",
"fullName": "Sebastian Stein",
"affiliation": "School of Computing Science, University of Glasgow, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "John H.",
"surname": "Williamson",
"fullName": "John H. Williamson",
"affiliation": "School of Computing Science, University of Glasgow, U.K.",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ecrts/2014/5798/0/5798a255",
"title": "Heart of Gold: Making the Improbable Happen to Increase Confidence in MBPTA",
"doi": null,
"abstractUrl": "/proceedings-article/ecrts/2014/5798a255/12OmNBSBkbE",
"parentPublication": {
"id": "proceedings/ecrts/2014/5798/0",
"title": "2014 26th Euromicro Conference on Real-Time Systems (ECRTS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itcs/2009/3688/2/3688b518",
"title": "Self-Balance Control of Inverted Pendulum Based on Fuzzy Skinner Operant Conditioning",
"doi": null,
"abstractUrl": "/proceedings-article/itcs/2009/3688b518/12OmNBkP3wX",
"parentPublication": {
"id": "proceedings/itcs/2009/3688/2",
"title": "Information Technology and Computer Science, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2015/8020/0/07450402",
"title": "CAD Parts-Based Assembly Modeling by Probabilistic Reasoning",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2015/07450402/12OmNwBjP5s",
"parentPublication": {
"id": "proceedings/cad-graphics/2015/8020/0",
"title": "2015 14th International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2014/5666/0/07004213",
"title": "PGMHD: A scalable probabilistic graphical model for massive hierarchical data problems",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2014/07004213/12OmNx7ov4h",
"parentPublication": {
"id": "proceedings/big-data/2014/5666/0",
"title": "2014 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/issrew/2016/3601/0/3601a181",
"title": "Bayesian Network Based Program Dependence Graph for Fault Localization",
"doi": null,
"abstractUrl": "/proceedings-article/issrew/2016/3601a181/12OmNzX6cqS",
"parentPublication": {
"id": "proceedings/issrew/2016/3601/0",
"title": "2016 IEEE International Symposium on Software Reliability Engineering Workshops (ISSREW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1993/03/i0280",
"title": "Probability Intervals Over Influence Diagrams",
"doi": null,
"abstractUrl": "/journal/tp/1993/03/i0280/13rRUxYIMWa",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wi/2018/7325/0/732500a064",
"title": "Bayesian Knowledge Base Distance-Based Tuning",
"doi": null,
"abstractUrl": "/proceedings-article/wi/2018/732500a064/17D45WWzW5e",
"parentPublication": {
"id": "proceedings/wi/2018/7325/0",
"title": "2018 IEEE/WIC/ACM International Conference on Web Intelligence (WI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ivmem/2019/4623/0/462300a101",
"title": "Application of Anomaly Detection Methods in the Housing and Utility Infrastructure Data",
"doi": null,
"abstractUrl": "/proceedings-article/ivmem/2019/462300a101/1eof3J4vtzG",
"parentPublication": {
"id": "proceedings/ivmem/2019/4623/0",
"title": "2019 Ivannikov Memorial Workshop (IVMEM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ai/2021/03/09446965",
"title": "Maximum Probability Theorem: A Framework for Probabilistic Machine Learning",
"doi": null,
"abstractUrl": "/journal/ai/2021/03/09446965/1ua0q5D18Yg",
"parentPublication": {
"id": "trans/ai",
"title": "IEEE Transactions on Artificial Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/lics/2021/4895/0/09470552",
"title": "Compositional Semantics for Probabilistic Programs with Exact Conditioning",
"doi": null,
"abstractUrl": "/proceedings-article/lics/2021/09470552/1v2Qw1pe6AM",
"parentPublication": {
"id": "proceedings/lics/2021/4895/0",
"title": "2021 36th Annual ACM/IEEE Symposium on Logic in Computer Science (LICS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10005621",
"articleId": "1JF3Umx3TXy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10008084",
"articleId": "1JIoM5ABwoU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1JIoMXrhsjK",
"name": "ttg555501-010007070s1-supp1-3231967.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010007070s1-supp1-3231967.pdf",
"extension": "pdf",
"size": "21.4 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1JIoM5ABwoU",
"doi": "10.1109/TVCG.2023.3234337",
"abstract": "The use of good-quality data to inform decision making is entirely dependent on robust processes to ensure it is fit for purpose. Such processes vary between organisations, and between those tasked with designing and following them. In this paper we report on a survey of 53 data analysts from many industry sectors, 24 of whom also participated in in-depth interviews, about computational and visual methods for characterizing data and investigating data quality. The paper makes contributions in two key areas. The first is to data science fundamentals, because our lists of data profiling tasks and visualization techniques are more comprehensive than those published elsewhere. The second concerns the application question “what does good profiling look like to those who routinely perform it?,” which we answer by highlighting the diversity of profiling tasks, unusual practice and exemplars of visualization, and recommendations about formalizing processes and creating rulebooks.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The use of good-quality data to inform decision making is entirely dependent on robust processes to ensure it is fit for purpose. Such processes vary between organisations, and between those tasked with designing and following them. In this paper we report on a survey of 53 data analysts from many industry sectors, 24 of whom also participated in in-depth interviews, about computational and visual methods for characterizing data and investigating data quality. The paper makes contributions in two key areas. The first is to data science fundamentals, because our lists of data profiling tasks and visualization techniques are more comprehensive than those published elsewhere. The second concerns the application question “what does good profiling look like to those who routinely perform it?,” which we answer by highlighting the diversity of profiling tasks, unusual practice and exemplars of visualization, and recommendations about formalizing processes and creating rulebooks.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The use of good-quality data to inform decision making is entirely dependent on robust processes to ensure it is fit for purpose. Such processes vary between organisations, and between those tasked with designing and following them. In this paper we report on a survey of 53 data analysts from many industry sectors, 24 of whom also participated in in-depth interviews, about computational and visual methods for characterizing data and investigating data quality. The paper makes contributions in two key areas. The first is to data science fundamentals, because our lists of data profiling tasks and visualization techniques are more comprehensive than those published elsewhere. The second concerns the application question “what does good profiling look like to those who routinely perform it?,” which we answer by highlighting the diversity of profiling tasks, unusual practice and exemplars of visualization, and recommendations about formalizing processes and creating rulebooks.",
"title": "Tasks and Visualizations Used for Data Profiling: A Survey and Interview Study",
"normalizedTitle": "Tasks and Visualizations Used for Data Profiling: A Survey and Interview Study",
"fno": "10008084",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Task Analysis",
"Data Integrity",
"Interviews",
"Visualization",
"Bars",
"Industries",
"Data Profiling",
"Data Quality",
"Interview",
"Survey"
],
"authors": [
{
"givenName": "Roy A.",
"surname": "Ruddle",
"fullName": "Roy A. Ruddle",
"affiliation": "University of Leeds, Leeds, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "James",
"surname": "Cheshire",
"fullName": "James Cheshire",
"affiliation": "University College London, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sara Johansson",
"surname": "Fernstad",
"fullName": "Sara Johansson Fernstad",
"affiliation": "Newcastle University, U.K.",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/pacificvis/2018/1424/0/142401a205",
"title": "Know Your Enemy: Identifying Quality Problems of Time Series Data",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2018/142401a205/12OmNqH9hoa",
"parentPublication": {
"id": "proceedings/pacificvis/2018/1424/0",
"title": "2018 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2017/5738/0/08031580",
"title": "Interaction+: Interaction enhancement for web-based visualizations",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2017/08031580/12OmNyQ7FJe",
"parentPublication": {
"id": "proceedings/pacificvis/2017/5738/0",
"title": "2017 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2016/2020/0/07498363",
"title": "Data profiling",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2016/07498363/12OmNzWfp1q",
"parentPublication": {
"id": "proceedings/icde/2016/2020/0",
"title": "2016 IEEE 32nd International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/07/08354901",
"title": "Task-Based Effectiveness of Basic Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2019/07/08354901/13rRUwd9CLU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/12/07369991",
"title": "The Elicitation Interview Technique: Capturing People's Experiences of Data Representations",
"doi": null,
"abstractUrl": "/journal/tg/2016/12/07369991/13rRUxBa5s2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2017/05/mcg2017050005",
"title": "Coming Into Focus: An Interview with Ellen Jantzen",
"doi": null,
"abstractUrl": "/magazine/cg/2017/05/mcg2017050005/13rRUyg2jNt",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440857",
"title": "Where's My Data? Evaluating Visualizations with Missing Data",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440857/17D45WZZ7Gl",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2018/4235/0/08506578",
"title": "Comparative Visualizations through Parameterization and Variability",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2018/08506578/17D45WaTki5",
"parentPublication": {
"id": "proceedings/vlhcc/2018/4235/0",
"title": "2018 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/06/08930077",
"title": "Gaze-Driven Adaptive Interventions for Magazine-Style Narrative Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2021/06/08930077/1fCCO10cYW4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2019/5227/0/522700a084",
"title": "Comparing the Effectiveness of Visualizations of Different Data Distributions",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2019/522700a084/1fHloum4ISY",
"parentPublication": {
"id": "proceedings/sibgrapi/2019/5227/0",
"title": "2019 32nd SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10007070",
"articleId": "1JH4iZQdoEU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10012127",
"articleId": "1JNmJ1M4N9e",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1JNmJfIGXlK",
"name": "ttg555501-010008084s1-tvcg-3234337-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010008084s1-tvcg-3234337-mm.zip",
"extension": "zip",
"size": "1.5 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1JF3Umx3TXy",
"doi": "10.1109/TVCG.2022.3233900",
"abstract": "The haze in a scenario may affect the 360 photo/video quality and the immersive 360<inline-formula><tex-math notation=\"LaTeX\">Z_$^{\\circ }$_Z</tex-math></inline-formula> virtual reality (VR) experience. The recent single image dehazing methods, to date, have been only focused on plane images. In this work, we propose a novel neural network pipeline for single omnidirectional image dehazing. To create the pipeline, we build the first hazy omnidirectional image dataset, which contains both synthetic and real-world samples. Then, we propose a new stripe sensitive convolution (SSConv) to handle the distortion problems due to the equirectangular projections. The SSConv calibrates distortion in two steps: 1) extracting features using different rectangular filters and, 2) learning to select the optimal features by a weighting of the feature stripes (a series of rows in the feature maps). Subsequently, using SSConv, we design an end-to-end network that jointly learns haze removal and depth estimation from a single omnidirectional image. The estimated depth map is leveraged as the intermediate representation and provides global context and geometric information to the dehazing module. Extensive experiments on challenging synthetic and real-world omnidirectional image datasets demonstrate the effectiveness of SSConv, and our network attains superior dehazing performance. The experiments on practical applications also demonstrate that our method can significantly improve the 3D object detection and 3D layout performances for hazy omnidirectional images.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The haze in a scenario may affect the 360 photo/video quality and the immersive 360<inline-formula><tex-math notation=\"LaTeX\">$^{\\circ }$</tex-math></inline-formula> virtual reality (VR) experience. The recent single image dehazing methods, to date, have been only focused on plane images. In this work, we propose a novel neural network pipeline for single omnidirectional image dehazing. To create the pipeline, we build the first hazy omnidirectional image dataset, which contains both synthetic and real-world samples. Then, we propose a new stripe sensitive convolution (SSConv) to handle the distortion problems due to the equirectangular projections. The SSConv calibrates distortion in two steps: 1) extracting features using different rectangular filters and, 2) learning to select the optimal features by a weighting of the feature stripes (a series of rows in the feature maps). Subsequently, using SSConv, we design an end-to-end network that jointly learns haze removal and depth estimation from a single omnidirectional image. The estimated depth map is leveraged as the intermediate representation and provides global context and geometric information to the dehazing module. Extensive experiments on challenging synthetic and real-world omnidirectional image datasets demonstrate the effectiveness of SSConv, and our network attains superior dehazing performance. The experiments on practical applications also demonstrate that our method can significantly improve the 3D object detection and 3D layout performances for hazy omnidirectional images.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The haze in a scenario may affect the 360 photo/video quality and the immersive 360- virtual reality (VR) experience. The recent single image dehazing methods, to date, have been only focused on plane images. In this work, we propose a novel neural network pipeline for single omnidirectional image dehazing. To create the pipeline, we build the first hazy omnidirectional image dataset, which contains both synthetic and real-world samples. Then, we propose a new stripe sensitive convolution (SSConv) to handle the distortion problems due to the equirectangular projections. The SSConv calibrates distortion in two steps: 1) extracting features using different rectangular filters and, 2) learning to select the optimal features by a weighting of the feature stripes (a series of rows in the feature maps). Subsequently, using SSConv, we design an end-to-end network that jointly learns haze removal and depth estimation from a single omnidirectional image. The estimated depth map is leveraged as the intermediate representation and provides global context and geometric information to the dehazing module. Extensive experiments on challenging synthetic and real-world omnidirectional image datasets demonstrate the effectiveness of SSConv, and our network attains superior dehazing performance. The experiments on practical applications also demonstrate that our method can significantly improve the 3D object detection and 3D layout performances for hazy omnidirectional images.",
"title": "Stripe Sensitive Convolution for Omnidirectional Image Dehazing",
"normalizedTitle": "Stripe Sensitive Convolution for Omnidirectional Image Dehazing",
"fno": "10005621",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Convolution",
"Distortion",
"Feature Extraction",
"Kernel",
"Estimation",
"Three Dimensional Displays",
"Layout",
"Omnidirectional Image Dehazing",
"Omnidirectional Image Depth Estimation",
"Stripe Sensitive Convolution",
"Virtual Reality"
],
"authors": [
{
"givenName": "Dong",
"surname": "Zhao",
"fullName": "Dong Zhao",
"affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, School of Computer Science and Engineering, Beihang University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jia",
"surname": "Li",
"fullName": "Jia Li",
"affiliation": "Peng Cheng Laboratory, Shenzhen, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hongyu",
"surname": "Li",
"fullName": "Hongyu Li",
"affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, School of Computer Science and Engineering, Beihang University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Long",
"surname": "Xu",
"fullName": "Long Xu",
"affiliation": "National Space Science Center, Chinese Academy of Sciences, Beijing, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2018/6100/0/610000b033",
"title": "Image Dehazing by Joint Estimation of Transmittance and Airlight Using Bi-Directional Consistency Loss Minimized FCN",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000b033/17D45VUZMXH",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000a867",
"title": "O-HAZE: A Dehazing Benchmark with Real Hazy and Haze-Free Outdoor Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000a867/17D45Vw15sD",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000b004",
"title": "NTIRE 2018 Challenge on Image Dehazing: Methods and Results",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000b004/17D45X2fUFI",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600c027",
"title": "Self-augmented Unpaired Image Dehazing via Density and Depth Decomposition",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600c027/1H0NQbECFwI",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2019/2506/0/250600c241",
"title": "NTIRE 2019 Image Dehazing Challenge Report",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2019/250600c241/1iTvlRqD01W",
"parentPublication": {
"id": "proceedings/cvprw/2019/2506/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150828",
"title": "NTIRE 2020 Challenge on NonHomogeneous Dehazing",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150828/1lPH0VWdEAg",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150807",
"title": "NH-HAZE: An Image Dehazing Benchmark with Non-Homogeneous Hazy and Haze-Free Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150807/1lPH99bacAo",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150997",
"title": "Trident Dehazing Network",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150997/1lPHm6Y3MXe",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800f930",
"title": "BidNet: Binocular Image Dehazing Without Explicit Disparity Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800f930/1m3nHZOzXyM",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2021/4899/0/489900a507",
"title": "Multi-Scale Selective Residual Learning for Non-Homogeneous Dehazing",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2021/489900a507/1yZ3Sx08wZq",
"parentPublication": {
"id": "proceedings/cvprw/2021/4899/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10005087",
"articleId": "1JC5yDf0E5q",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10007070",
"articleId": "1JH4iZQdoEU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1JH4iDuJiM0",
"name": "ttg555501-010005621s1-supp1-3233900.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010005621s1-supp1-3233900.pdf",
"extension": "pdf",
"size": "4.35 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1JC5xZN3afu",
"doi": "10.1109/TVCG.2022.3233389",
"abstract": "Hybrid visualizations combine different metaphors into a single network layout, in order to help humans in finding the “right way” of displaying the different portions of the network, especially when it is globally sparse and locally dense. We investigate hybrid visualizations in two complementary directions: (i) On the one hand, we evaluate the effectiveness of different hybrid visualization models through a comparative user study; (ii) On the other hand, we estimate the usefulness of an interactive visualization that integrates all the considered hybrid models together. The results of our study provide some hints about the usefulness of the different hybrid visualizations for specific tasks of analysis and indicates that integrating different hybrid models into a single visualization may offer a valuable tool of analysis.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Hybrid visualizations combine different metaphors into a single network layout, in order to help humans in finding the “right way” of displaying the different portions of the network, especially when it is globally sparse and locally dense. We investigate hybrid visualizations in two complementary directions: (i) On the one hand, we evaluate the effectiveness of different hybrid visualization models through a comparative user study; (ii) On the other hand, we estimate the usefulness of an interactive visualization that integrates all the considered hybrid models together. The results of our study provide some hints about the usefulness of the different hybrid visualizations for specific tasks of analysis and indicates that integrating different hybrid models into a single visualization may offer a valuable tool of analysis.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Hybrid visualizations combine different metaphors into a single network layout, in order to help humans in finding the “right way” of displaying the different portions of the network, especially when it is globally sparse and locally dense. We investigate hybrid visualizations in two complementary directions: (i) On the one hand, we evaluate the effectiveness of different hybrid visualization models through a comparative user study; (ii) On the other hand, we estimate the usefulness of an interactive visualization that integrates all the considered hybrid models together. The results of our study provide some hints about the usefulness of the different hybrid visualizations for specific tasks of analysis and indicates that integrating different hybrid models into a single visualization may offer a valuable tool of analysis.",
"title": "Comparative Study and Evaluation of Hybrid Visualizations of Graphs",
"normalizedTitle": "Comparative Study and Evaluation of Hybrid Visualizations of Graphs",
"fno": "10004748",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visualization",
"Task Analysis",
"Analytical Models",
"Layout",
"Computational Modeling",
"Social Networking Online",
"Sparse Matrices",
"Network Visualization",
"Hybrid Visualizations",
"Evaluation",
"User Study"
],
"authors": [
{
"givenName": "Emilio Di",
"surname": "Giacomo",
"fullName": "Emilio Di Giacomo",
"affiliation": "Department of Engineering, University of Perugia, Italy",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Walter",
"surname": "Didimo",
"fullName": "Walter Didimo",
"affiliation": "Department of Engineering, University of Perugia, Italy",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Giuseppe",
"surname": "Liotta",
"fullName": "Giuseppe Liotta",
"affiliation": "Department of Engineering, University of Perugia, Italy",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Fabrizio",
"surname": "Montecchiani",
"fullName": "Fabrizio Montecchiani",
"affiliation": "Department of Engineering, University of Perugia, Italy",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Alessandra",
"surname": "Tappini",
"fullName": "Alessandra Tappini",
"affiliation": "Department of Engineering, University of Perugia, Italy",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/pacificvis/2010/6685/0/05429591",
"title": "Visual analysis of large graphs using (X,Y)-clustering and hybrid visualizations",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2010/05429591/12OmNBkxsuy",
"parentPublication": {
"id": "proceedings/pacificvis/2010/6685/0",
"title": "2010 IEEE Pacific Visualization Symposium (PacificVis 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2011/0868/0/06004064",
"title": "Listening to Managers: A Study about Visualizations in Corporate Presentations",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2011/06004064/12OmNqBbHF8",
"parentPublication": {
"id": "proceedings/iv/2011/0868/0",
"title": "2011 15th International Conference on Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07536142",
"title": "Exploring the Possibilities of Embedding Heterogeneous Data Attributes in Familiar Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07536142/13rRUEgarjx",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/06/v1302",
"title": "NodeTrix: a Hybrid Visualization of Social Networks",
"doi": null,
"abstractUrl": "/journal/tg/2007/06/v1302/13rRUyYjKa7",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08233127",
"title": "Atom: A Grammar for Unit Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08233127/14H4WLzSYsE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2018/4235/0/08506578",
"title": "Comparative Visualizations through Parameterization and Variability",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2018/08506578/17D45WaTki5",
"parentPublication": {
"id": "proceedings/vlhcc/2018/4235/0",
"title": "2018 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vizsec/2018/8194/0/08709181",
"title": "An Empirical Study on Perceptually Masking Privacy in Graph Visualizations",
"doi": null,
"abstractUrl": "/proceedings-article/vizsec/2018/08709181/19ZL2eLPBfO",
"parentPublication": {
"id": "proceedings/vizsec/2018/8194/0",
"title": "2018 IEEE Symposium on Visualization for Cyber Security (VizSec)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09916137",
"title": "Revisiting the Design Patterns of Composite Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09916137/1HojAjSAGNq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/02/09165928",
"title": "Hybrid Graph Visualizations With ChordLink: Algorithms, Experiments, and Applications",
"doi": null,
"abstractUrl": "/journal/tg/2022/02/09165928/1mevWoz3hM4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552938",
"title": "Communicating Visualizations without Visuals: Investigation of Visualization Alternative Text for People with Visual Impairments",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552938/1xjQYJDwaxa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10002893",
"articleId": "1Jv6oNxHXqM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10005035",
"articleId": "1JC5yiVyrXa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1JDp0x5xWH6",
"name": "ttg555501-010004748s1-supp1-3233389.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010004748s1-supp1-3233389.pdf",
"extension": "pdf",
"size": "9.44 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1JC5yiVyrXa",
"doi": "10.1109/TVCG.2022.3233548",
"abstract": "Neurons have a polarized structure, with dendrites and axons, and compartment-specific functions can be affected by the dwelling mitochondria. Recent studies have shown that the morphology of mitochondria is closely related to the functions of neurons and neurodegenerative diseases. However, the conventional mitochondria analysis workflow mainly relies on manual annotations and generic image-processing software. Moreover, even though there have been recent developments in automatic mitochondria analysis using deep learning, the application of existing methods in a daily analysis remains challenging because the performance of a pretrained deep learning model can vary depending on the target data, and there are always errors in inference time, requiring human proofreading. To address these issues, we introduce <monospace>MitoVis</monospace>, a novel visualization system for end-to-end data processing and an interactive analysis of the morphology of neuronal mitochondria. <monospace>MitoVis</monospace> introduces a novel active learning framework based on recent contrastive learning, which allows accurate fine-tuning of the neural network model. <monospace>MitoVis</monospace> also provides novel visual guides for interactive proofreading so that users can quickly identify and correct errors in the result with minimal effort. We demonstrate the usefulness and efficacy of the system via case studies conducted by neuroscientists. The results show that <monospace>MitoVis</monospace> achieved up to 13.3× faster total analysis time in the case study compared to the conventional manual analysis workflow.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Neurons have a polarized structure, with dendrites and axons, and compartment-specific functions can be affected by the dwelling mitochondria. Recent studies have shown that the morphology of mitochondria is closely related to the functions of neurons and neurodegenerative diseases. However, the conventional mitochondria analysis workflow mainly relies on manual annotations and generic image-processing software. Moreover, even though there have been recent developments in automatic mitochondria analysis using deep learning, the application of existing methods in a daily analysis remains challenging because the performance of a pretrained deep learning model can vary depending on the target data, and there are always errors in inference time, requiring human proofreading. To address these issues, we introduce <monospace>MitoVis</monospace>, a novel visualization system for end-to-end data processing and an interactive analysis of the morphology of neuronal mitochondria. <monospace>MitoVis</monospace> introduces a novel active learning framework based on recent contrastive learning, which allows accurate fine-tuning of the neural network model. <monospace>MitoVis</monospace> also provides novel visual guides for interactive proofreading so that users can quickly identify and correct errors in the result with minimal effort. We demonstrate the usefulness and efficacy of the system via case studies conducted by neuroscientists. The results show that <monospace>MitoVis</monospace> achieved up to 13.3× faster total analysis time in the case study compared to the conventional manual analysis workflow.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Neurons have a polarized structure, with dendrites and axons, and compartment-specific functions can be affected by the dwelling mitochondria. Recent studies have shown that the morphology of mitochondria is closely related to the functions of neurons and neurodegenerative diseases. However, the conventional mitochondria analysis workflow mainly relies on manual annotations and generic image-processing software. Moreover, even though there have been recent developments in automatic mitochondria analysis using deep learning, the application of existing methods in a daily analysis remains challenging because the performance of a pretrained deep learning model can vary depending on the target data, and there are always errors in inference time, requiring human proofreading. To address these issues, we introduce MitoVis, a novel visualization system for end-to-end data processing and an interactive analysis of the morphology of neuronal mitochondria. MitoVis introduces a novel active learning framework based on recent contrastive learning, which allows accurate fine-tuning of the neural network model. MitoVis also provides novel visual guides for interactive proofreading so that users can quickly identify and correct errors in the result with minimal effort. We demonstrate the usefulness and efficacy of the system via case studies conducted by neuroscientists. The results show that MitoVis achieved up to 13.3× faster total analysis time in the case study compared to the conventional manual analysis workflow.",
"title": "MitoVis: A Unified Visual Analytics System for End-to-End Neuronal Mitochondria Analysis",
"normalizedTitle": "MitoVis: A Unified Visual Analytics System for End-to-End Neuronal Mitochondria Analysis",
"fno": "10005035",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Morphology",
"Neurons",
"Microscopy",
"Dendrites Neurons",
"Deep Learning",
"Axons",
"Biomedical And Medical Visualization",
"Intelligence Analysis",
"Machine Learning",
"Task And Requirements Analysis",
"User Interfaces"
],
"authors": [
{
"givenName": "JunYoung",
"surname": "Choi",
"fullName": "JunYoung Choi",
"affiliation": "Korea University, Seoul, Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hyun-Jic",
"surname": "Oh",
"fullName": "Hyun-Jic Oh",
"affiliation": "Korea University, Seoul, Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hakjun",
"surname": "Lee",
"fullName": "Hakjun Lee",
"affiliation": "Korea University, Seoul, Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Suyeon",
"surname": "Kim",
"fullName": "Suyeon Kim",
"affiliation": "Korea University, Seoul, Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Seok-Kyu",
"surname": "Kwon",
"fullName": "Seok-Kyu Kwon",
"affiliation": "Korea University, Seoul, Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Won-Ki",
"surname": "Jeong",
"fullName": "Won-Ki Jeong",
"affiliation": "Korea University, Seoul, Korea",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2010/7029/0/05543594",
"title": "Radon-Like features and their application to connectomics",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2010/05543594/12OmNyPQ4Ci",
"parentPublication": {
"id": "proceedings/cvprw/2010/7029/0",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2015/7568/0/7568a316",
"title": "Detecting Criminal Relationships through SOM Visual Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2015/7568a316/12OmNzC5ToI",
"parentPublication": {
"id": "proceedings/iv/2015/7568/0",
"title": "2015 19th International Conference on Information Visualisation (iV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08017643",
"title": "Abstractocyte: A Visual Tool for Exploring Nanoscale Astroglial Cells",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017643/13rRUwI5U7Y",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192653",
"title": "NeuroBlocks – Visual Tracking of Segmentation and Proofreading for Large Connectomics Projects",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192653/13rRUwh80uC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440805",
"title": "Visualization of Neuronal Structures in Wide-Field Microscopy Brain Images",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440805/17D45WnnFUX",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2021/5841/0/584100b176",
"title": "Impact of Patch Extraction Variables on Histopathological Imagery Classification Using Convolution Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2021/584100b176/1EpLxSXoQ5a",
"parentPublication": {
"id": "proceedings/csci/2021/5841/0",
"title": "2021 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600b999",
"title": "A Morphology Focused Diffusion Probabilistic Model for Synthesis of Histopathology Images",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600b999/1La4IB9ITaE",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aipr/2022/7729/0/10092238",
"title": "Axon and Myelin Sheath Segmentation in Electron Microscopy Images using Meta Learning",
"doi": null,
"abstractUrl": "/proceedings-article/aipr/2022/10092238/1MepMj5HXsQ",
"parentPublication": {
"id": "proceedings/aipr/2022/7729/0",
"title": "2022 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150739",
"title": "A Topological Nomenclature for 3D Shape Analysis in Connectomics",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150739/1lPHt0H0q0E",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/tase/2020/4086/0/408600a073",
"title": "Feature-oriented Design of Visual Analytics System for Interpretable Deep Learning based Intrusion Detection",
"doi": null,
"abstractUrl": "/proceedings-article/tase/2020/408600a073/1t0HAB8lCE0",
"parentPublication": {
"id": "proceedings/tase/2020/4086/0",
"title": "2020 International Symposium on Theoretical Aspects of Software Engineering (TASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10004748",
"articleId": "1JC5xZN3afu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10005087",
"articleId": "1JC5yDf0E5q",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1JDp0Xi2i6k",
"name": "ttg555501-010005035s1-supp1-3233548.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010005035s1-supp1-3233548.mp4",
"extension": "mp4",
"size": "199 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1JC5yDf0E5q",
"doi": "10.1109/TVCG.2022.3233287",
"abstract": "Recent works in graph visualization attempt to reduce the runtime of <italic>repulsion</italic> force computation of force-directed algorithms using sampling. However, they fail to reduce the runtime for <italic>attraction</italic> force computation to sublinear in the number of edges. We present the <monospace>SubLinearForce</monospace> framework for a fully sublinear-time <italic>force computation</italic> algorithm for drawing large complex graphs. More precisely, we present new sublinear-time algorithms for the <italic>attraction force</italic> computation of force-directed algorithms. We then integrate them with sublinear-time repulsion force computation to give a fully sublinear-time force computation. Extensive experiments show that our algorithms compute layouts on average 80% faster than the existing linear-time force computation algorithm, while obtaining significantly better quality metrics such as edge crossing and shape-based metrics.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Recent works in graph visualization attempt to reduce the runtime of <italic>repulsion</italic> force computation of force-directed algorithms using sampling. However, they fail to reduce the runtime for <italic>attraction</italic> force computation to sublinear in the number of edges. We present the <monospace>SubLinearForce</monospace> framework for a fully sublinear-time <italic>force computation</italic> algorithm for drawing large complex graphs. More precisely, we present new sublinear-time algorithms for the <italic>attraction force</italic> computation of force-directed algorithms. We then integrate them with sublinear-time repulsion force computation to give a fully sublinear-time force computation. Extensive experiments show that our algorithms compute layouts on average 80% faster than the existing linear-time force computation algorithm, while obtaining significantly better quality metrics such as edge crossing and shape-based metrics.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Recent works in graph visualization attempt to reduce the runtime of repulsion force computation of force-directed algorithms using sampling. However, they fail to reduce the runtime for attraction force computation to sublinear in the number of edges. We present the SubLinearForce framework for a fully sublinear-time force computation algorithm for drawing large complex graphs. More precisely, we present new sublinear-time algorithms for the attraction force computation of force-directed algorithms. We then integrate them with sublinear-time repulsion force computation to give a fully sublinear-time force computation. Extensive experiments show that our algorithms compute layouts on average 80% faster than the existing linear-time force computation algorithm, while obtaining significantly better quality metrics such as edge crossing and shape-based metrics.",
"title": "SubLinearForce: Fully Sublinear-Time Force Computation for Large Complex Graph Drawing",
"normalizedTitle": "SubLinearForce: Fully Sublinear-Time Force Computation for Large Complex Graph Drawing",
"fno": "10005087",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Force",
"Runtime",
"Measurement",
"Resistance",
"Approximation Algorithms",
"Springs",
"Scalability",
"Graph Drawing",
"Force Directed Algorithms",
"Sublinear Time Algorithms"
],
"authors": [
{
"givenName": "Amyra",
"surname": "Meidiana",
"fullName": "Amyra Meidiana",
"affiliation": "University of Sydney, Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Seok-Hee",
"surname": "Hong",
"fullName": "Seok-Hee Hong",
"affiliation": "University of Sydney, Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shijun",
"surname": "Cai",
"fullName": "Shijun Cai",
"affiliation": "University of Sydney, Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Marnijati",
"surname": "Torkel",
"fullName": "Marnijati Torkel",
"affiliation": "University of Sydney, Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Peter",
"surname": "Eades",
"fullName": "Peter Eades",
"affiliation": "University of Sydney, Australia",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2005/2397/0/23970329",
"title": "A New Force-Directed Graph Drawing Method Based on Edge-Edge Repulsion",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2005/23970329/12OmNCwCLry",
"parentPublication": {
"id": "proceedings/iv/2005/2397/0",
"title": "Ninth International Conference on Information Visualisation (IV'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dac/1987/0781/0/01586315",
"title": "Heuristic Acceleration of Force-Directed Placement",
"doi": null,
"abstractUrl": "/proceedings-article/dac/1987/01586315/12OmNwvDQuj",
"parentPublication": {
"id": "proceedings/dac/1987/0781/0",
"title": "24th ACM/IEEE Design Automation Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2017/03/07556272",
"title": "Vibrotactile Compliance Feedback for Tangential Force Interaction",
"doi": null,
"abstractUrl": "/journal/th/2017/03/07556272/13rRUwcS1D9",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2014/01/06674293",
"title": "Grip Force Control during Virtual Object Interaction: Effect of Force Feedback, Accuracy Demands, and Training",
"doi": null,
"abstractUrl": "/journal/th/2014/01/06674293/13rRUxAASW5",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/07/08606273",
"title": "Penalty Force for Coupling Materials with Coulomb Friction",
"doi": null,
"abstractUrl": "/journal/tg/2020/07/08606273/17D45WB0qbq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09881908",
"title": "PropelWalker: A Leg-Based Wearable System With Propeller-Based Force Feedback for Walking in Fluids in VR",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09881908/1Gv909WpCG4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10024360",
"title": "Force-Directed Graph Layouts Revisited: A New Force Based on the T-Distribution",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10024360/1KaBabqZxSg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ldav/2019/2605/0/08944364",
"title": "Force-Directed Graph Layouts by Edge Sampling",
"doi": null,
"abstractUrl": "/proceedings-article/ldav/2019/08944364/1grOFicLl9S",
"parentPublication": {
"id": "proceedings/ldav/2019/2605/0",
"title": "2019 IEEE 9th Symposium on Large Data Analysis and Visualization (LDAV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2020/8014/0/801400a096",
"title": "Accelerating Force-Directed Graph Drawing with RT Cores",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2020/801400a096/1qROE1kZkek",
"parentPublication": {
"id": "proceedings/vis/2020/8014/0",
"title": "2020 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2021/3931/0/393100a146",
"title": "Sublinear-Time Attraction Force Computation for Large Complex Graph Drawing",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2021/393100a146/1tTtrX8Ij72",
"parentPublication": {
"id": "proceedings/pacificvis/2021/3931/0",
"title": "2021 IEEE 14th Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10005035",
"articleId": "1JC5yiVyrXa",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10005621",
"articleId": "1JF3Umx3TXy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Jv6oNxHXqM",
"doi": "10.1109/TVCG.2022.3232959",
"abstract": "A viewer's existing beliefs can prevent accurate reasoning with data visualizations. In particular, confirmation bias can cause people to overweigh information that confirms their beliefs, and dismiss information that disconfirms them. We tested whether confirmation bias exists when people reason with visualized data and whether certain visualization designs can elicit less biased reasoning strategies. We asked crowdworkers to solve reasoning problems that had the potential to evoke both poor reasoning strategies and confirmation bias. We created two scenarios, one in which we primed people with a belief before asking them to make a decision, and another in which people held pre-existing beliefs. The data was presented as either a table, a bar table, or a bar chart. To correctly solve the problem, participants should use a complex reasoning strategy to compare two ratios, each between two pairs of values. But participants could also be tempted to use simpler, superficial heuristics, shortcuts, or biased strategies to reason about the problem. Presenting the data in a table format helped participants reason with the correct ratio strategy while showing the data as a bar table or a bar chart led participants towards incorrect heuristics. Confirmation bias was not significantly present when beliefs were primed, but it was present when beliefs were pre-existing. Additionally, the table presentation format was more likely to afford the ratio reasoning strategy, and the use of ratio strategy was more likely to lead to the correct answer. These findings suggest that data presentation formats can affect affordances for reasoning.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A viewer's existing beliefs can prevent accurate reasoning with data visualizations. In particular, confirmation bias can cause people to overweigh information that confirms their beliefs, and dismiss information that disconfirms them. We tested whether confirmation bias exists when people reason with visualized data and whether certain visualization designs can elicit less biased reasoning strategies. We asked crowdworkers to solve reasoning problems that had the potential to evoke both poor reasoning strategies and confirmation bias. We created two scenarios, one in which we primed people with a belief before asking them to make a decision, and another in which people held pre-existing beliefs. The data was presented as either a table, a bar table, or a bar chart. To correctly solve the problem, participants should use a complex reasoning strategy to compare two ratios, each between two pairs of values. But participants could also be tempted to use simpler, superficial heuristics, shortcuts, or biased strategies to reason about the problem. Presenting the data in a table format helped participants reason with the correct ratio strategy while showing the data as a bar table or a bar chart led participants towards incorrect heuristics. Confirmation bias was not significantly present when beliefs were primed, but it was present when beliefs were pre-existing. Additionally, the table presentation format was more likely to afford the ratio reasoning strategy, and the use of ratio strategy was more likely to lead to the correct answer. These findings suggest that data presentation formats can affect affordances for reasoning.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A viewer's existing beliefs can prevent accurate reasoning with data visualizations. In particular, confirmation bias can cause people to overweigh information that confirms their beliefs, and dismiss information that disconfirms them. We tested whether confirmation bias exists when people reason with visualized data and whether certain visualization designs can elicit less biased reasoning strategies. We asked crowdworkers to solve reasoning problems that had the potential to evoke both poor reasoning strategies and confirmation bias. We created two scenarios, one in which we primed people with a belief before asking them to make a decision, and another in which people held pre-existing beliefs. The data was presented as either a table, a bar table, or a bar chart. To correctly solve the problem, participants should use a complex reasoning strategy to compare two ratios, each between two pairs of values. But participants could also be tempted to use simpler, superficial heuristics, shortcuts, or biased strategies to reason about the problem. Presenting the data in a table format helped participants reason with the correct ratio strategy while showing the data as a bar table or a bar chart led participants towards incorrect heuristics. Confirmation bias was not significantly present when beliefs were primed, but it was present when beliefs were pre-existing. Additionally, the table presentation format was more likely to afford the ratio reasoning strategy, and the use of ratio strategy was more likely to lead to the correct answer. These findings suggest that data presentation formats can affect affordances for reasoning.",
"title": "Reasoning Affordances with Tables and Bar Charts",
"normalizedTitle": "Reasoning Affordances with Tables and Bar Charts",
"fno": "10002893",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Cognition",
"Urban Areas",
"Bars",
"Skin",
"Affordances",
"Weapons",
"Data Visualization",
"Tabular Displays",
"Empirical Evaluation",
"Reasoning"
],
"authors": [
{
"givenName": "Cindy",
"surname": "Xiong",
"fullName": "Cindy Xiong",
"affiliation": "University of Massachusetts, Amherst, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Elsie",
"surname": "Lee-Robbins",
"fullName": "Elsie Lee-Robbins",
"affiliation": "University of Michigan, Ann Arbor, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Icy",
"surname": "Zhang",
"fullName": "Icy Zhang",
"affiliation": "University of California Los Angeles, Los Angeles, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Aimen",
"surname": "Gaba",
"fullName": "Aimen Gaba",
"affiliation": "University of Massachusetts, Amherst, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Steven",
"surname": "Franconeri",
"fullName": "Steven Franconeri",
"affiliation": "Northwestern University, Evanston, U.K.",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iiai-aai/2015/9957/0/07373871",
"title": "Evidential Reasoning in Annotated Logics",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2015/07373871/12OmNBrV1N8",
"parentPublication": {
"id": "proceedings/iiai-aai/2015/9957/0",
"title": "2015 IIAI 4th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2018/2666/1/266601a787",
"title": "Possibilistic Reasoning About Actions in Agent Systems",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2018/266601a787/144U9b07hJQ",
"parentPublication": {
"id": "proceedings/compsac/2018/2666/2",
"title": "2018 IEEE 42nd Annual Computer Software and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/lics/2016/4391/0/08576427",
"title": "Hybrid realizability for intuitionistic and classical choice",
"doi": null,
"abstractUrl": "/proceedings-article/lics/2016/08576427/17D45VTRov8",
"parentPublication": {
"id": "proceedings/lics/2016/4391/0",
"title": "2016 31st Annual ACM/IEEE Symposium on Logic in Computer Science (LICS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2018/7202/0/720200a159",
"title": "Improving Perception Accuracy in Bar Charts with Internal Contrast and Framing Enhancements",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2018/720200a159/17D45WnnFWc",
"parentPublication": {
"id": "proceedings/iv/2018/7202/0",
"title": "2018 22nd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904487",
"title": "Studying Early Decision Making with Progressive Bar Charts",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904487/1H1geE4olvG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2022/9007/0/900700a067",
"title": "An Overview of the Design and Development for Dynamic and Physical Bar Charts",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2022/900700a067/1KaH61BvDWw",
"parentPublication": {
"id": "proceedings/iv/2022/9007/0",
"title": "2022 26th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a163",
"title": "Proposal and Evaluation of Textual Description Templates for Bar Charts Vocalization",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a163/1cMFc4aDtWo",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a151",
"title": "The Cost of Pie Charts",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a151/1cMFcqwGM5q",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/lics/2021/4895/0/09470523",
"title": "On the logical structure of choice and bar induction principles",
"doi": null,
"abstractUrl": "/proceedings-article/lics/2021/09470523/1v2QqZ6uDNS",
"parentPublication": {
"id": "proceedings/lics/2021/4895/0",
"title": "2021 36th Annual ACM/IEEE Symposium on Logic in Computer Science (LICS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552208",
"title": "Visual Arrangements of Bar Charts Influence Comparisons in Viewer Takeaways",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552208/1xibWU97C8w",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10003102",
"articleId": "1Jv6onSqGf6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10004748",
"articleId": "1JC5xZN3afu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1K3XCVYUmw8",
"name": "ttg555501-010002893s1-tvcg-3232959-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010002893s1-tvcg-3232959-mm.zip",
"extension": "zip",
"size": "90.9 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Jv6onSqGf6",
"doi": "10.1109/TVCG.2022.3231230",
"abstract": "We introduce a conceptual model for scalability designed for visualization research. With this model, we systematically analyze over 120 visualization publications from 1990 to 2020 to characterize the different notions of scalability in these works. While many papers have addressed scalability issues, our survey identifies a lack of consistency in the use of the term in the visualization research community. We address this issue by introducing a consistent terminology meant to help visualization researchers better characterize the scalability aspects in their research. It also helps in providing multiple methods for supporting the claim that a work is “scalable.” Our model is centered around an effort function with inputs and outputs. The inputs are the problem size and resources, whereas the outputs are the actual efforts, for instance, in terms of computational run time or visual clutter. We select representative examples to illustrate different approaches and facets of what scalability can mean in visualization literature. Finally, targeting the diverse crowd of visualization researchers without a scalability tradition, we provide a set of recommendations for how scalability can be presented in a clear and consistent way to improve fair comparison between visualization techniques and systems and foster reproducibility.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We introduce a conceptual model for scalability designed for visualization research. With this model, we systematically analyze over 120 visualization publications from 1990 to 2020 to characterize the different notions of scalability in these works. While many papers have addressed scalability issues, our survey identifies a lack of consistency in the use of the term in the visualization research community. We address this issue by introducing a consistent terminology meant to help visualization researchers better characterize the scalability aspects in their research. It also helps in providing multiple methods for supporting the claim that a work is “scalable.” Our model is centered around an effort function with inputs and outputs. The inputs are the problem size and resources, whereas the outputs are the actual efforts, for instance, in terms of computational run time or visual clutter. We select representative examples to illustrate different approaches and facets of what scalability can mean in visualization literature. Finally, targeting the diverse crowd of visualization researchers without a scalability tradition, we provide a set of recommendations for how scalability can be presented in a clear and consistent way to improve fair comparison between visualization techniques and systems and foster reproducibility.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We introduce a conceptual model for scalability designed for visualization research. With this model, we systematically analyze over 120 visualization publications from 1990 to 2020 to characterize the different notions of scalability in these works. While many papers have addressed scalability issues, our survey identifies a lack of consistency in the use of the term in the visualization research community. We address this issue by introducing a consistent terminology meant to help visualization researchers better characterize the scalability aspects in their research. It also helps in providing multiple methods for supporting the claim that a work is “scalable.” Our model is centered around an effort function with inputs and outputs. The inputs are the problem size and resources, whereas the outputs are the actual efforts, for instance, in terms of computational run time or visual clutter. We select representative examples to illustrate different approaches and facets of what scalability can mean in visualization literature. Finally, targeting the diverse crowd of visualization researchers without a scalability tradition, we provide a set of recommendations for how scalability can be presented in a clear and consistent way to improve fair comparison between visualization techniques and systems and foster reproducibility.",
"title": "Scalability in Visualization",
"normalizedTitle": "Scalability in Visualization",
"fno": "10003102",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Scalability",
"Visualization",
"Computational Modeling",
"Encoding",
"Psychology",
"Human Computer Interaction",
"Computer Science",
"Scalability",
"Visualization",
"Structured Literature Analysis",
"Conceptual Framework"
],
"authors": [
{
"givenName": "Gaëlle",
"surname": "Richer",
"fullName": "Gaëlle Richer",
"affiliation": "Université Paris-Saclay, CNRS, Inria, LISN, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Alexis",
"surname": "Pister",
"fullName": "Alexis Pister",
"affiliation": "Université Paris-Saclay, CNRS, Inria, LISN, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Moataz",
"surname": "Abdelaal",
"fullName": "Moataz Abdelaal",
"affiliation": "University of Stuttgart, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jean-Daniel",
"surname": "Fekete",
"fullName": "Jean-Daniel Fekete",
"affiliation": "Université Paris-Saclay, CNRS, Inria, LISN, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Michael",
"surname": "Sedlmair",
"fullName": "Michael Sedlmair",
"affiliation": "University of Stuttgart, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Daniel",
"surname": "Weiskopf",
"fullName": "Daniel Weiskopf",
"affiliation": "University of Stuttgart, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ficloud/2014/4357/0/4357a497",
"title": "Scalar: Systematic Scalability Analysis with the Universal Scalability Law",
"doi": null,
"abstractUrl": "/proceedings-article/ficloud/2014/4357a497/12OmNB0X8vc",
"parentPublication": {
"id": "proceedings/ficloud/2014/4357/0",
"title": "2014 2nd International Conference on Future Internet of Things and Cloud (FiCloud)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nbis/2011/4458/0/4458a456",
"title": "A New Interface for Large Scale Tiled Display System Considering Scalability",
"doi": null,
"abstractUrl": "/proceedings-article/nbis/2011/4458a456/12OmNBU1jQj",
"parentPublication": {
"id": "proceedings/nbis/2011/4458/0",
"title": "2011 14th International Conference on Network-Based Information Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2012/06/mcg2012060088",
"title": "Understanding Visualization by Understanding Individual Users",
"doi": null,
"abstractUrl": "/magazine/cg/2012/06/mcg2012060088/13rRUNvya3t",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2008/02/tth2008020130",
"title": "Using Haptics to Convey Cause-and-Effect Relations in Climate Visualization",
"doi": null,
"abstractUrl": "/journal/th/2008/02/tth2008020130/13rRUwIF6dX",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/05/v0837",
"title": "The Perceptual Scalability of Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2006/05/v0837/13rRUwjXZS3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg2011122106",
"title": "WYSIWYG (What You See is What You Get) Volume Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg2011122106/13rRUxCitJa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192692",
"title": "VEEVVIE: Visual Explorer for Empirical Visualization, VR and Interaction Experiments",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192692/13rRUyeTVi5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/beliv/2018/6884/0/08634261",
"title": "Towards Designing Unbiased Replication Studies in Information Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/beliv/2018/08634261/17D45WWzW4f",
"parentPublication": {
"id": "proceedings/beliv/2018/6884/0",
"title": "2018 IEEE Evaluation and Beyond - Methodological Approaches for Visualization (BELIV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08805424",
"title": "What is Interaction for Data Visualization?",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08805424/1cG4MsovTO0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08809850",
"title": "Discriminability Tests for Visualization Effectiveness and Scalability",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08809850/1cHEkrFpU76",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10002316",
"articleId": "1JtvHc3BND2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10002893",
"articleId": "1Jv6oNxHXqM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1JtvHc3BND2",
"doi": "10.1109/TVCG.2022.3232591",
"abstract": "We present experimental results to explore a form of bivariate glyphs for representing large-magnitude-range vectors. The glyphs meet two conditions: (1) two visual dimensions are separable; and (2) one of the two visual dimensions uses a categorical representation (e.g., a categorical colormap). We evaluate how much these two conditions determine the bivariate glyphs' effectiveness. The first experiment asks participants to perform three local tasks requiring reading no more than two glyphs. The second experiment scales up the search space in global tasks when participants must look at the entire scene of hundreds of vector glyphs to get an answer. Our results support that the first condition is necessary for local tasks when a few items are compared. But it is not enough for understanding a large amount of data. The second condition is necessary for perceiving global structures of examining very complex datasets. Participants' comments reveal that the categorical features in the bivariate glyphs trigger emergent optimal viewers' behaviors. This work contributes to perceptually accurate glyph representations for revealing patterns from large scientific results. We release source code, quantum physics data, training documents, participants' answers, and statistical analyses for reproducible science at <uri>https : //osf:io/4xcf5/?viewonly = 94123139df9c4ac984a1e0df811cd580</uri>.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present experimental results to explore a form of bivariate glyphs for representing large-magnitude-range vectors. The glyphs meet two conditions: (1) two visual dimensions are separable; and (2) one of the two visual dimensions uses a categorical representation (e.g., a categorical colormap). We evaluate how much these two conditions determine the bivariate glyphs' effectiveness. The first experiment asks participants to perform three local tasks requiring reading no more than two glyphs. The second experiment scales up the search space in global tasks when participants must look at the entire scene of hundreds of vector glyphs to get an answer. Our results support that the first condition is necessary for local tasks when a few items are compared. But it is not enough for understanding a large amount of data. The second condition is necessary for perceiving global structures of examining very complex datasets. Participants' comments reveal that the categorical features in the bivariate glyphs trigger emergent optimal viewers' behaviors. This work contributes to perceptually accurate glyph representations for revealing patterns from large scientific results. We release source code, quantum physics data, training documents, participants' answers, and statistical analyses for reproducible science at <uri>https : //osf:io/4xcf5/?viewonly = 94123139df9c4ac984a1e0df811cd580</uri>.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present experimental results to explore a form of bivariate glyphs for representing large-magnitude-range vectors. The glyphs meet two conditions: (1) two visual dimensions are separable; and (2) one of the two visual dimensions uses a categorical representation (e.g., a categorical colormap). We evaluate how much these two conditions determine the bivariate glyphs' effectiveness. The first experiment asks participants to perform three local tasks requiring reading no more than two glyphs. The second experiment scales up the search space in global tasks when participants must look at the entire scene of hundreds of vector glyphs to get an answer. Our results support that the first condition is necessary for local tasks when a few items are compared. But it is not enough for understanding a large amount of data. The second condition is necessary for perceiving global structures of examining very complex datasets. Participants' comments reveal that the categorical features in the bivariate glyphs trigger emergent optimal viewers' behaviors. This work contributes to perceptually accurate glyph representations for revealing patterns from large scientific results. We release source code, quantum physics data, training documents, participants' answers, and statistical analyses for reproducible science at https : //osf:io/4xcf5/?viewonly = 94123139df9c4ac984a1e0df811cd580.",
"title": "Evaluating Glyph Design for Showing Large-Magnitude-Range Quantum Spins",
"normalizedTitle": "Evaluating Glyph Design for Showing Large-Magnitude-Range Quantum Spins",
"fno": "10002316",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Task Analysis",
"Image Color Analysis",
"Visualization",
"Three Dimensional Displays",
"Data Visualization",
"Behavioral Sciences",
"Shape",
"Separable And Integral Dimension Pairs",
"Bivariate Glyph",
"3 D Glyph",
"Quantitative Visualization",
"Large Magnitude Range"
],
"authors": [
{
"givenName": "Henan",
"surname": "Zhao",
"fullName": "Henan Zhao",
"affiliation": "University of Maryland, Baltimore, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Garnett W.",
"surname": "Bryant",
"fullName": "Garnett W. Bryant",
"affiliation": "National Institute of Standards and Technology, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wesley",
"surname": "Griffin",
"fullName": "Wesley Griffin",
"affiliation": "Stellar Science, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Judith E.",
"surname": "Terrill",
"fullName": "Judith E. Terrill",
"affiliation": "National Institute of Standards and Technology, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jian",
"surname": "Chen",
"fullName": "Jian Chen",
"affiliation": "Ohio State University, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-20",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/pacificvis/2015/6879/0/07156363",
"title": "Applying feature integration theory to glyph-based information visualization",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2015/07156363/12OmNxYbT4i",
"parentPublication": {
"id": "proceedings/pacificvis/2015/6879/0",
"title": "2015 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/10/08078198",
"title": "Cartogram Visualization for Bivariate Geo-Statistical Data",
"doi": null,
"abstractUrl": "/journal/tg/2018/10/08078198/13rRUx0xPZE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icceai/2022/6803/0/680300a642",
"title": "Examining Users' Continuous Use Intention of AI-Enabled Online Education Applications",
"doi": null,
"abstractUrl": "/proceedings-article/icceai/2022/680300a642/1FUVFTiMK3e",
"parentPublication": {
"id": "proceedings/icceai/2022/6803/0",
"title": "2022 International Conference on Computer Engineering and Artificial Intelligence (ICCEAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09905997",
"title": "Unifying Effects of Direct and Relational Associations for Visual Communication",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09905997/1H3ZWHY73by",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09930144",
"title": "Out of the Plane: Flower Vs. Star Glyphs to Support High-Dimensional Exploration in Two-Dimensional Embeddings",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09930144/1HMOX2J2VMY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/5555/01/09963914",
"title": "Exploring the Function of Expressions in Negotiation: the DyNego-WOZ Corpus",
"doi": null,
"abstractUrl": "/journal/ta/5555/01/09963914/1Iz0JTbEyaY",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ica/2022/6936/0/693600a048",
"title": "Improvement of Proactive Attitude by Alternating Enhancement of a Sense of Acceptance and Control",
"doi": null,
"abstractUrl": "/proceedings-article/ica/2022/693600a048/1JvaJz64Tcs",
"parentPublication": {
"id": "proceedings/ica/2022/6936/0",
"title": "2022 IEEE International Conference on Agents (ICA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10021888",
"title": "Continuous Scatterplot Operators for Bivariate Analysis and Study of Electronic Transitions",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10021888/1K3XDZ8pUAg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2022/4609/0/460900a163",
"title": "Interpreting Categorical Data Classifiers using Explanation-based Locality",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2022/460900a163/1KBr42Oepfa",
"parentPublication": {
"id": "proceedings/icdmw/2022/4609/0",
"title": "2022 IEEE International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2019/4941/0/08933656",
"title": "Evaluating Ordering Strategies of Star Glyph Axes",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2019/08933656/1fTgJ3IVtjq",
"parentPublication": {
"id": "proceedings/vis/2019/4941/0",
"title": "2019 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09999502",
"articleId": "1JrMCoX7Xpe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10002893",
"articleId": "1Jv6oNxHXqM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1Jv6pbvoses",
"name": "ttg555501-010002316s1-supp2-3232591.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010002316s1-supp2-3232591.mp4",
"extension": "mp4",
"size": "12.1 MB",
"__typename": "WebExtraType"
},
{
"id": "1Jv6p2cBxVm",
"name": "ttg555501-010002316s1-supp1-3232591.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010002316s1-supp1-3232591.mp4",
"extension": "mp4",
"size": "7.6 MB",
"__typename": "WebExtraType"
},
{
"id": "1Jv6pju9hWU",
"name": "ttg555501-010002316s1-supp3-3232591.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010002316s1-supp3-3232591.mp4",
"extension": "mp4",
"size": "11.9 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1JqCYw9nZde",
"doi": "10.1109/TVCG.2022.3230832",
"abstract": "A computational graph in a deep neural network (DNN) denotes a specific data flow diagram (DFD) composed of many tensors and operators. Existing toolkits for visualizing computational graphs are not applicable when the structure is highly complicated and large-scale (e.g., BERT [1]). To address this problem, we propose leveraging a suite of visual simplification techniques, including a cycle-removing method, a module-based edge-pruning algorithm, and an isomorphic subgraph stacking strategy. We design and implement an interactive visualization system that is suitable for computational graphs with up to 10 thousand elements. Experimental results and usage scenarios demonstrate that our tool reduces 60% elements on average and hence enhances the performance for recognizing and diagnosing DNN models. Our contributions are integrated into an open-source DNN visualization toolkit, namely, MindInsight [2].",
"abstracts": [
{
"abstractType": "Regular",
"content": "A computational graph in a deep neural network (DNN) denotes a specific data flow diagram (DFD) composed of many tensors and operators. Existing toolkits for visualizing computational graphs are not applicable when the structure is highly complicated and large-scale (e.g., BERT [1]). To address this problem, we propose leveraging a suite of visual simplification techniques, including a cycle-removing method, a module-based edge-pruning algorithm, and an isomorphic subgraph stacking strategy. We design and implement an interactive visualization system that is suitable for computational graphs with up to 10 thousand elements. Experimental results and usage scenarios demonstrate that our tool reduces 60% elements on average and hence enhances the performance for recognizing and diagnosing DNN models. Our contributions are integrated into an open-source DNN visualization toolkit, namely, MindInsight [2].",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A computational graph in a deep neural network (DNN) denotes a specific data flow diagram (DFD) composed of many tensors and operators. Existing toolkits for visualizing computational graphs are not applicable when the structure is highly complicated and large-scale (e.g., BERT [1]). To address this problem, we propose leveraging a suite of visual simplification techniques, including a cycle-removing method, a module-based edge-pruning algorithm, and an isomorphic subgraph stacking strategy. We design and implement an interactive visualization system that is suitable for computational graphs with up to 10 thousand elements. Experimental results and usage scenarios demonstrate that our tool reduces 60% elements on average and hence enhances the performance for recognizing and diagnosing DNN models. Our contributions are integrated into an open-source DNN visualization toolkit, namely, MindInsight [2].",
"title": "Towards Efficient Visual Simplification of Computational Graphs in Deep Neural Networks",
"normalizedTitle": "Towards Efficient Visual Simplification of Computational Graphs in Deep Neural Networks",
"fno": "09999322",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visualization",
"Computational Modeling",
"Tensors",
"Layout",
"Bit Error Rate",
"Graph Drawing",
"Computational Efficiency",
"Deep Neural Networks",
"Computational Graphs",
"Graph Visualization",
"Graph Layout",
"Visual Simplifications"
],
"authors": [
{
"givenName": "Rusheng",
"surname": "Pan",
"fullName": "Rusheng Pan",
"affiliation": "Stake key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhiyong",
"surname": "Wang",
"fullName": "Zhiyong Wang",
"affiliation": "Stake key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yating",
"surname": "Wei",
"fullName": "Yating Wei",
"affiliation": "Stake key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Han",
"surname": "Gao",
"fullName": "Han Gao",
"affiliation": "Distributed Data Lab, Huawei Technologies Co., Ltd., Shenzhen, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Gongchang",
"surname": "Ou",
"fullName": "Gongchang Ou",
"affiliation": "Distributed Data Lab, Huawei Technologies Co., Ltd., Shenzhen, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Caleb Chen",
"surname": "Cao",
"fullName": "Caleb Chen Cao",
"affiliation": "Distributed Data Lab, Huawei Technologies Co., Ltd., Shenzhen, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jingli",
"surname": "Xu",
"fullName": "Jingli Xu",
"affiliation": "Stake key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tong",
"surname": "Xu",
"fullName": "Tong Xu",
"affiliation": "Stake key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wei",
"surname": "Chen",
"fullName": "Wei Chen",
"affiliation": "Stake key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-infovis/2004/8779/0/87790199",
"title": "Interactive Visualization of Small World Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/2004/87790199/12OmNCgrDcT",
"parentPublication": {
"id": "proceedings/ieee-infovis/2004/8779/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1999/5897/0/58970013",
"title": "Image Graphs - A Novel Approach to Visual Data Exploration",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1999/58970013/12OmNwDSdkT",
"parentPublication": {
"id": "proceedings/ieee-vis/1999/5897/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-infovis/2003/2055/0/20550008",
"title": "MoireGraphs: Radial Focus+Context Visualization and Interaction for Graphs with Visual Nodes",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/2003/20550008/12OmNxiKrV4",
"parentPublication": {
"id": "proceedings/ieee-infovis/2003/2055/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/11/06812198",
"title": "Visual Adjacency Lists for Dynamic Graphs",
"doi": null,
"abstractUrl": "/journal/tg/2014/11/06812198/13rRUxcbnCs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2018/5520/0/552000b767",
"title": "Incremental Frequent Subgraph Mining on Large Evolving Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2018/552000b767/14Fq0WLXpnQ",
"parentPublication": {
"id": "proceedings/icde/2018/5520/0",
"title": "2018 IEEE 34th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2020/02/08546786",
"title": "Training Simplification and Model Simplification for Deep Learning : A Minimal Effort Back Propagation Method",
"doi": null,
"abstractUrl": "/journal/tk/2020/02/08546786/17D45We0UEP",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08546841",
"title": "Graph Edit Distance Testing through Synthetic Graphs Generation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08546841/17D45XacGhv",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse/2022/9221/0/922100a798",
"title": "EAGLE: Creating Equivalent Graphs to Test Deep Learning Libraries",
"doi": null,
"abstractUrl": "/proceedings-article/icse/2022/922100a798/1EmrR9NmgrS",
"parentPublication": {
"id": "proceedings/icse/2022/9221/0",
"title": "2022 IEEE/ACM 44th International Conference on Software Engineering (ICSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2021/5841/0/584100a506",
"title": "Splatter: An Efficient Sparse Image Convolution for Deep Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2021/584100a506/1EpL0zxLfNK",
"parentPublication": {
"id": "proceedings/csci/2021/5841/0",
"title": "2021 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/11/08732351",
"title": "Visual Genealogy of Deep Neural Networks",
"doi": null,
"abstractUrl": "/journal/tg/2020/11/08732351/1aDQt8709So",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09998319",
"articleId": "1JlF3cSB3wY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09999502",
"articleId": "1JrMCoX7Xpe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1JU0215mAec",
"name": "ttg555501-09999322s1-supp1-3230832.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09999322s1-supp1-3230832.pdf",
"extension": "pdf",
"size": "90.5 kB",
"__typename": "WebExtraType"
},
{
"id": "1JU01TPJYc0",
"name": "ttg555501-09999322s1-supp2-3230832.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09999322s1-supp2-3230832.mp4",
"extension": "mp4",
"size": "47 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1JrMCoX7Xpe",
"doi": "10.1109/TVCG.2022.3232367",
"abstract": "The large-scale motions in 3D turbulent channel flows, known as Turbulent Superstructures (TSS), play an essential role in the dynamics of small-scale structures within the turbulent boundary layer. However, as of today, there is no common agreement on the spatial and temporal relationships between these multiscale structures. We propose a novel space-time visualization technique for analyzing the temporal evolution of these multiscale structures in their spatial context and, thus, to further shed light on the conceptually different explanations of their dynamics. Since the temporal dynamics of TSS are believed to influence the structures in the turbulent boundary layer, we propose a combination of a 2D space-time velocity plot with an orthogonal 2D plot of projected 3D flow structures, which can interactively span the time and the space axis. Besides flow structures indicating the fluid motion, we propose showing the variations in derived fields as an additional source of explanation. The relationships between the structures in different spatial and temporal scales can be more effectively resolved by using various filtering operations and image registration algorithms. To reduce the information loss due to the non-injective nature of projection, spatial information is encoded into transparency or color. Since the proposed visualization is heavily demanding computational resources and memory bandwidth to stream unsteady flow fields and instantly compute derived 3D flow structures, the implementation exploits data compression, parallel computation capabilities, and high memory bandwidth on recent GPUs via the CUDA compute library.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The large-scale motions in 3D turbulent channel flows, known as Turbulent Superstructures (TSS), play an essential role in the dynamics of small-scale structures within the turbulent boundary layer. However, as of today, there is no common agreement on the spatial and temporal relationships between these multiscale structures. We propose a novel space-time visualization technique for analyzing the temporal evolution of these multiscale structures in their spatial context and, thus, to further shed light on the conceptually different explanations of their dynamics. Since the temporal dynamics of TSS are believed to influence the structures in the turbulent boundary layer, we propose a combination of a 2D space-time velocity plot with an orthogonal 2D plot of projected 3D flow structures, which can interactively span the time and the space axis. Besides flow structures indicating the fluid motion, we propose showing the variations in derived fields as an additional source of explanation. The relationships between the structures in different spatial and temporal scales can be more effectively resolved by using various filtering operations and image registration algorithms. To reduce the information loss due to the non-injective nature of projection, spatial information is encoded into transparency or color. Since the proposed visualization is heavily demanding computational resources and memory bandwidth to stream unsteady flow fields and instantly compute derived 3D flow structures, the implementation exploits data compression, parallel computation capabilities, and high memory bandwidth on recent GPUs via the CUDA compute library.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The large-scale motions in 3D turbulent channel flows, known as Turbulent Superstructures (TSS), play an essential role in the dynamics of small-scale structures within the turbulent boundary layer. However, as of today, there is no common agreement on the spatial and temporal relationships between these multiscale structures. We propose a novel space-time visualization technique for analyzing the temporal evolution of these multiscale structures in their spatial context and, thus, to further shed light on the conceptually different explanations of their dynamics. Since the temporal dynamics of TSS are believed to influence the structures in the turbulent boundary layer, we propose a combination of a 2D space-time velocity plot with an orthogonal 2D plot of projected 3D flow structures, which can interactively span the time and the space axis. Besides flow structures indicating the fluid motion, we propose showing the variations in derived fields as an additional source of explanation. The relationships between the structures in different spatial and temporal scales can be more effectively resolved by using various filtering operations and image registration algorithms. To reduce the information loss due to the non-injective nature of projection, spatial information is encoded into transparency or color. Since the proposed visualization is heavily demanding computational resources and memory bandwidth to stream unsteady flow fields and instantly compute derived 3D flow structures, the implementation exploits data compression, parallel computation capabilities, and high memory bandwidth on recent GPUs via the CUDA compute library.",
"title": "Spatio-Temporal Visual Analysis of Turbulent Superstructures in Unsteady Flow",
"normalizedTitle": "Spatio-Temporal Visual Analysis of Turbulent Superstructures in Unsteady Flow",
"fno": "09999502",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Three Dimensional Displays",
"Visualization",
"Graphics Processing Units",
"Periodic Structures",
"Data Visualization",
"Feature Extraction",
"Bandwidth",
"Animation And Motion Related Techniques",
"Flow Visualization",
"Large Scale Data Techniques"
],
"authors": [
{
"givenName": "Behdad",
"surname": "Ghaffari",
"fullName": "Behdad Ghaffari",
"affiliation": "Technical University of Munich, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Davide",
"surname": "Gatti",
"fullName": "Davide Gatti",
"affiliation": "Karlsruhe Institute of Technology, Karlsruhe, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rüdiger",
"surname": "Westermann",
"fullName": "Rüdiger Westermann",
"affiliation": "Technical University of Munich, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/pacificvis/2015/6879/0/07156352",
"title": "Parallel unsteady flow line integral convolution for high-performance dense visualization",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2015/07156352/12OmNAlvI6a",
"parentPublication": {
"id": "proceedings/pacificvis/2015/6879/0",
"title": "2015 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visual/1993/3940/0/00398850",
"title": "Visualization of turbulent flow with particles",
"doi": null,
"abstractUrl": "/proceedings-article/visual/1993/00398850/12OmNAolGVS",
"parentPublication": {
"id": "proceedings/visual/1993/3940/0",
"title": "Proceedings Visualization '93",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mmcs/1996/7438/0/00535011",
"title": "Spatio-temporal indexing for large multimedia applications",
"doi": null,
"abstractUrl": "/proceedings-article/mmcs/1996/00535011/12OmNBOllrt",
"parentPublication": {
"id": "proceedings/mmcs/1996/7438/0",
"title": "Proceedings of the Third IEEE International Conference on Multimedia Computing and Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dexa/2000/0680/0/06800881",
"title": "Efficient Index Structures for Spatio-Temporal Objects",
"doi": null,
"abstractUrl": "/proceedings-article/dexa/2000/06800881/12OmNCgrD2A",
"parentPublication": {
"id": "proceedings/dexa/2000/0680/0",
"title": "Proceedings 11th International Workshop on Database and Expert Systems Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visual/2001/7201/0/00964493",
"title": "Lagrangian-Eulerian advection for unsteady flow visualization",
"doi": null,
"abstractUrl": "/proceedings-article/visual/2001/00964493/12OmNqC2v4a",
"parentPublication": {
"id": "proceedings/visual/2001/7201/0",
"title": "Proceedings VIS 2001. Visualization 2001",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eqec/2005/8973/0/01567264",
"title": "Spatio-temporal vortices: properties, generation and recording",
"doi": null,
"abstractUrl": "/proceedings-article/eqec/2005/01567264/12OmNvAAtGx",
"parentPublication": {
"id": "proceedings/eqec/2005/8973/0",
"title": "2005 European Quantum Electronics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2002/1531/0/15310166",
"title": "Indexing Spatio-Temporal Data Warehouses",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2002/15310166/12OmNy5R3yL",
"parentPublication": {
"id": "proceedings/icde/2002/1531/0",
"title": "Proceedings 18th International Conference on Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sc/2013/9999/0/06877494",
"title": "Petascale direct numerical simulation of turbulent channel flow on up to 786K cores",
"doi": null,
"abstractUrl": "/proceedings-article/sc/2013/06877494/12OmNzdoN4R",
"parentPublication": {
"id": "proceedings/sc/2013/9999/0",
"title": "2013 SC - International Conference for High Performance Computing, Networking, Storage and Analysis",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1998/02/i0193",
"title": "Extracting 3D Vortices in Turbulent Fluid Flow",
"doi": null,
"abstractUrl": "/journal/tp/1998/02/i0193/13rRUxASuTT",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2005/02/v0113",
"title": "Accelerated Unsteady Flow Line Integral Convolution",
"doi": null,
"abstractUrl": "/journal/tg/2005/02/v0113/13rRUyuegh2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09999322",
"articleId": "1JqCYw9nZde",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10002316",
"articleId": "1JtvHc3BND2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1JU018l3gOI",
"name": "ttg555501-09999502s1-supp1-3232367.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09999502s1-supp1-3232367.mp4",
"extension": "mp4",
"size": "218 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1JlF32mS2SQ",
"doi": "10.1109/TVCG.2022.3231680",
"abstract": "Point clouds obtained from 3D scanners are often noisy and cannot be directly used for subsequent high-level tasks. In this paper, we propose a novel point cloud optimization method capable of denoising and homogenizing point clouds. Our idea is based on the assumption that the noise is generally much smaller than the effective signal. We perform noise perturbation on the noisy point cloud to get a new noisy point cloud, called self-variation point cloud. The noisy point cloud and self-variation point cloud have different noise distribution, but the same point cloud distribution. We compute the potential commonality between two noisy point clouds to obtain a clean point cloud. To implement our idea, we propose a <italic>Self-Variation Capture Network</italic> (SVCNet). We perturb the point cloud features in the latent space to obtain self-variation feature vectors, and capture the commonality between two noisy feature vectors through the feature aggregation and averaging. In addition, an edge constraint module is introduced to suppress low-pass effects during denoising. Our denoising method does not take into account the noise characteristics, and can filter the drift noise located on the underlying surface, resulting in a uniform distribution of the generated point cloud. The experimental results show that our algorithm outperforms the current state-of-the-art algorithms, especially in generating more uniform point clouds. In addition, extended experiments demonstrate the potential of our algorithm for point clouds upsampling.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Point clouds obtained from 3D scanners are often noisy and cannot be directly used for subsequent high-level tasks. In this paper, we propose a novel point cloud optimization method capable of denoising and homogenizing point clouds. Our idea is based on the assumption that the noise is generally much smaller than the effective signal. We perform noise perturbation on the noisy point cloud to get a new noisy point cloud, called self-variation point cloud. The noisy point cloud and self-variation point cloud have different noise distribution, but the same point cloud distribution. We compute the potential commonality between two noisy point clouds to obtain a clean point cloud. To implement our idea, we propose a <italic>Self-Variation Capture Network</italic> (SVCNet). We perturb the point cloud features in the latent space to obtain self-variation feature vectors, and capture the commonality between two noisy feature vectors through the feature aggregation and averaging. In addition, an edge constraint module is introduced to suppress low-pass effects during denoising. Our denoising method does not take into account the noise characteristics, and can filter the drift noise located on the underlying surface, resulting in a uniform distribution of the generated point cloud. The experimental results show that our algorithm outperforms the current state-of-the-art algorithms, especially in generating more uniform point clouds. In addition, extended experiments demonstrate the potential of our algorithm for point clouds upsampling.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Point clouds obtained from 3D scanners are often noisy and cannot be directly used for subsequent high-level tasks. In this paper, we propose a novel point cloud optimization method capable of denoising and homogenizing point clouds. Our idea is based on the assumption that the noise is generally much smaller than the effective signal. We perform noise perturbation on the noisy point cloud to get a new noisy point cloud, called self-variation point cloud. The noisy point cloud and self-variation point cloud have different noise distribution, but the same point cloud distribution. We compute the potential commonality between two noisy point clouds to obtain a clean point cloud. To implement our idea, we propose a Self-Variation Capture Network (SVCNet). We perturb the point cloud features in the latent space to obtain self-variation feature vectors, and capture the commonality between two noisy feature vectors through the feature aggregation and averaging. In addition, an edge constraint module is introduced to suppress low-pass effects during denoising. Our denoising method does not take into account the noise characteristics, and can filter the drift noise located on the underlying surface, resulting in a uniform distribution of the generated point cloud. The experimental results show that our algorithm outperforms the current state-of-the-art algorithms, especially in generating more uniform point clouds. In addition, extended experiments demonstrate the potential of our algorithm for point clouds upsampling.",
"title": "From Noise Addition to Denoising: A Self-Variation Capture Network for Point Cloud Optimization",
"normalizedTitle": "From Noise Addition to Denoising: A Self-Variation Capture Network for Point Cloud Optimization",
"fno": "09998112",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Point Cloud Compression",
"Noise Reduction",
"Noise Measurement",
"Manifolds",
"Optimization",
"Three Dimensional Displays",
"Surface Cleaning",
"Point Clouds Denoising",
"Self Variation",
"Noise Perturbation",
"Commonality Capture"
],
"authors": [
{
"givenName": "Tianming",
"surname": "Zhao",
"fullName": "Tianming Zhao",
"affiliation": "School of Artificial the Intelligence and Automation, Huazhong University of Science and Technology, Wuhan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Peng",
"surname": "Gao",
"fullName": "Peng Gao",
"affiliation": "School of Artificial the Intelligence and Automation, Huazhong University of Science and Technology, Wuhan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tian",
"surname": "Tian",
"fullName": "Tian Tian",
"affiliation": "School of Artificial the Intelligence and Automation, Huazhong University of Science and Technology, Wuhan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jiayi",
"surname": "Ma",
"fullName": "Jiayi Ma",
"affiliation": "Electronic Information School, Wuhan University, Wuhan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jinwen",
"surname": "Tian",
"fullName": "Jinwen Tian",
"affiliation": "School of Artificial the Intelligence and Automation, Huazhong University of Science and Technology, Wuhan, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icmew/2017/0560/0/08026263",
"title": "Subjective and objective quality evaluation of 3D point cloud denoising algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2017/08026263/12OmNBvkdns",
"parentPublication": {
"id": "proceedings/icmew/2017/0560/0",
"title": "2017 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2013/4893/0/06455215",
"title": "Two-stage Point-sampled Model Denoising by Robust Ellipsoid Criterion and Mean Shift",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2013/06455215/12OmNrkjVhD",
"parentPublication": {
"id": "proceedings/isdea/2013/4893/0",
"title": "2013 Third International Conference on Intelligent System Design and Engineering Applications (ISDEA 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigmm/2018/5321/0/08499090",
"title": "Graph-Based Point Cloud Denoising",
"doi": null,
"abstractUrl": "/proceedings-article/bigmm/2018/08499090/17D45VTRoAv",
"parentPublication": {
"id": "proceedings/bigmm/2018/5321/0",
"title": "2018 IEEE Fourth International Conference on Multimedia Big Data (BigMM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a444",
"title": "Structured Low-Rank Matrix Factorization for Point-Cloud Denoising",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a444/17D45XacGiJ",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200e563",
"title": "Score-Based Point Cloud Denoising",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200e563/1BmFFYz8vXa",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/03/09775211",
"title": "Deep Point Set Resampling via Gradient Fields",
"doi": null,
"abstractUrl": "/journal/tp/2023/03/09775211/1Dqh2PmIooM",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/11/08730533",
"title": "Multi-Patch Collaborative Point Cloud Denoising via Low-Rank Recovery with Graph Constraint",
"doi": null,
"abstractUrl": "/journal/tg/2020/11/08730533/1aAxaVT7HtS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/06/08933110",
"title": "Anisotropic Denoising of 3D Point Clouds by Aggregation of Multiple Surface-Adaptive Estimates",
"doi": null,
"abstractUrl": "/journal/tg/2021/06/08933110/1gKvciBzOfu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300a052",
"title": "Total Denoising: Unsupervised Learning of 3D Point Cloud Cleaning",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300a052/1hVlvo8kleE",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccst/2020/8138/0/813800a123",
"title": "Point Cloud Denoising Algorithm Based on Noise Classification",
"doi": null,
"abstractUrl": "/proceedings-article/iccst/2020/813800a123/1p1gtJQEUPC",
"parentPublication": {
"id": "proceedings/iccst/2020/8138/0",
"title": "2020 International Conference on Culture-oriented Science & Technology (ICCST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09993762",
"articleId": "1Jgw2iruIyk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09998319",
"articleId": "1JlF3cSB3wY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1JqCYjFT3Lq",
"name": "ttg555501-09998112s1-supp1-3231680.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09998112s1-supp1-3231680.pdf",
"extension": "pdf",
"size": "2.09 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1JlF3cSB3wY",
"doi": "10.1109/TVCG.2022.3231716",
"abstract": "Leaving the context of visualizations invisible can have negative impacts on understanding and transparency. While common wisdom suggests that recontextualizing visualizations with metadata (e.g., disclosing the data source or instructions for decoding the visualizations' encoding) may counter these effects, the impact remains largely unknown. To fill this gap, we conducted two experiments. In Experiment 1, we explored how chart type, topic, and user goal impacted which categories of metadata participants deemed most relevant. We presented 64 participants with four real-world visualizations. For each visualization, participants were given four goals and selected the type of metadata they most wanted from a set of 18 types. Our results indicated that participants were most interested in metadata which explained the visualization's encoding for goals related to understanding and metadata about the source of the data for assessing trustworthiness. In Experiment 2, we explored how these two types of metadata impact transparency, trustworthiness and persuasiveness, information relevance, and understanding. We asked 144 participants to explain the main message of two pairs of visualizations (one with metadata and one without); rate them on scales of transparency and relevance; and then predict the likelihood that they were selected for a presentation to policymakers. Our results suggested that visualizations with metadata were perceived as more thorough than those without metadata, but similarly relevant, accurate, clear, and complete. Additionally, we found that metadata did not impact the accuracy of the information extracted from visualizations, but may have influenced which information participants remembered as important or interesting.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Leaving the context of visualizations invisible can have negative impacts on understanding and transparency. While common wisdom suggests that recontextualizing visualizations with metadata (e.g., disclosing the data source or instructions for decoding the visualizations' encoding) may counter these effects, the impact remains largely unknown. To fill this gap, we conducted two experiments. In Experiment 1, we explored how chart type, topic, and user goal impacted which categories of metadata participants deemed most relevant. We presented 64 participants with four real-world visualizations. For each visualization, participants were given four goals and selected the type of metadata they most wanted from a set of 18 types. Our results indicated that participants were most interested in metadata which explained the visualization's encoding for goals related to understanding and metadata about the source of the data for assessing trustworthiness. In Experiment 2, we explored how these two types of metadata impact transparency, trustworthiness and persuasiveness, information relevance, and understanding. We asked 144 participants to explain the main message of two pairs of visualizations (one with metadata and one without); rate them on scales of transparency and relevance; and then predict the likelihood that they were selected for a presentation to policymakers. Our results suggested that visualizations with metadata were perceived as more thorough than those without metadata, but similarly relevant, accurate, clear, and complete. Additionally, we found that metadata did not impact the accuracy of the information extracted from visualizations, but may have influenced which information participants remembered as important or interesting.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Leaving the context of visualizations invisible can have negative impacts on understanding and transparency. While common wisdom suggests that recontextualizing visualizations with metadata (e.g., disclosing the data source or instructions for decoding the visualizations' encoding) may counter these effects, the impact remains largely unknown. To fill this gap, we conducted two experiments. In Experiment 1, we explored how chart type, topic, and user goal impacted which categories of metadata participants deemed most relevant. We presented 64 participants with four real-world visualizations. For each visualization, participants were given four goals and selected the type of metadata they most wanted from a set of 18 types. Our results indicated that participants were most interested in metadata which explained the visualization's encoding for goals related to understanding and metadata about the source of the data for assessing trustworthiness. In Experiment 2, we explored how these two types of metadata impact transparency, trustworthiness and persuasiveness, information relevance, and understanding. We asked 144 participants to explain the main message of two pairs of visualizations (one with metadata and one without); rate them on scales of transparency and relevance; and then predict the likelihood that they were selected for a presentation to policymakers. Our results suggested that visualizations with metadata were perceived as more thorough than those without metadata, but similarly relevant, accurate, clear, and complete. Additionally, we found that metadata did not impact the accuracy of the information extracted from visualizations, but may have influenced which information participants remembered as important or interesting.",
"title": "From Invisible to Visible: Impacts of Metadata in Communicative Data Visualization",
"normalizedTitle": "From Invisible to Visible: Impacts of Metadata in Communicative Data Visualization",
"fno": "09998319",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Metadata",
"Encoding",
"Soft Sensors",
"Stakeholders",
"Organizations",
"Data Mining",
"Visualization",
"Metadata",
"Understanding",
"Transparency",
"Trust"
],
"authors": [
{
"givenName": "Alyxander",
"surname": "Burns",
"fullName": "Alyxander Burns",
"affiliation": "Mount Holyoke College, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Christiana",
"surname": "Lee",
"fullName": "Christiana Lee",
"affiliation": "University of Massachusetts Amherst, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Thai",
"surname": "On",
"fullName": "Thai On",
"affiliation": "University of Massachusetts Amherst, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Cindy",
"surname": "Xiong",
"fullName": "Cindy Xiong",
"affiliation": "University of Massachusetts Amherst, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Evan",
"surname": "Peck",
"fullName": "Evan Peck",
"affiliation": "Bucknell University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Narges",
"surname": "Mahyar",
"fullName": "Narges Mahyar",
"affiliation": "University of Massachusetts Amherst, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-16",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iri/2018/2659/0/265901a452",
"title": "Universal Metadata Repository: Integrating Data Profiles Across an Organization",
"doi": null,
"abstractUrl": "/proceedings-article/iri/2018/265901a452/12OmNBqMDlR",
"parentPublication": {
"id": "proceedings/iri/2018/2659/0",
"title": "2018 IEEE International Conference on Information Reuse and Integration (IRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsc/2016/0662/0/0662a071",
"title": "Towards Cleaning-Up Open Data Portals: A Metadata Reconciliation Approach",
"doi": null,
"abstractUrl": "/proceedings-article/icsc/2016/0662a071/12OmNwkR5CP",
"parentPublication": {
"id": "proceedings/icsc/2016/0662/0",
"title": "2016 IEEE Tenth International Conference on Semantic Computing (ICSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssdbm/1998/8575/0/85750216",
"title": "Integrated Metadata-Systems within Statistical Offices",
"doi": null,
"abstractUrl": "/proceedings-article/ssdbm/1998/85750216/12OmNxQOjCq",
"parentPublication": {
"id": "proceedings/ssdbm/1998/8575/0",
"title": "Scientific and Statistical Database Management, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/esiat/2009/3682/2/3682b152",
"title": "Development of Municipal Infrastructure Metadata System and Standard",
"doi": null,
"abstractUrl": "/proceedings-article/esiat/2009/3682b152/12OmNxisR0o",
"parentPublication": {
"id": "esiat/2009/3682/2",
"title": "Environmental Science and Information Application Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2018/12/08370078",
"title": "A Flattened Metadata Service for Distributed File Systems",
"doi": null,
"abstractUrl": "/journal/td/2018/12/08370078/17D45WB0qax",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sc/2017/5114/0/09926246",
"title": "LocoFS: A Loosely-Coupled Metadata Service for Distributed File Systems",
"doi": null,
"abstractUrl": "/proceedings-article/sc/2017/09926246/1HOxzXnsb5K",
"parentPublication": {
"id": "proceedings/sc/2017/5114/0",
"title": "SC17: International Conference for High Performance Computing, Networking, Storage and Analysis",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/e-science/2022/6124/0/612400a423",
"title": "Cluster Analysis of Open Research Data and a Case for Replication Metadata",
"doi": null,
"abstractUrl": "/proceedings-article/e-science/2022/612400a423/1J6hxvSIVGg",
"parentPublication": {
"id": "proceedings/e-science/2022/6124/0",
"title": "2022 IEEE 18th International Conference on e-Science (e-Science)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsc/2023/8263/0/826300a312",
"title": "What metadata is needed for semantic and data mappings?",
"doi": null,
"abstractUrl": "/proceedings-article/icsc/2023/826300a312/1LFKUsjOliM",
"parentPublication": {
"id": "proceedings/icsc/2023/8263/0",
"title": "2023 IEEE 17th International Conference on Semantic Computing (ICSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/2022/11/09376935",
"title": "Practical Temporal Prefetching With Compressed On-Chip Metadata",
"doi": null,
"abstractUrl": "/journal/tc/2022/11/09376935/1rUNmLJLZPW",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis4good/2021/1366/0/136600a011",
"title": "Making the Invisible Visible: Risks and Benefits of Disclosing Metadata in Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/vis4good/2021/136600a011/1yNiRLiy36g",
"parentPublication": {
"id": "proceedings/vis4good/2021/1366/0",
"title": "2021 IEEE Workshop on Visualization for Social Good (VIS4Good)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09998112",
"articleId": "1JlF32mS2SQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09999322",
"articleId": "1JqCYw9nZde",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Jgw1XkmIEM",
"doi": "10.1109/TVCG.2022.3230636",
"abstract": "High-performance computing (HPC) systems play a critical role in facilitating scientific discoveries. Their scale and complexity (e.g., the number of computational units and software stack) continue to grow as new systems are expected to process increasingly more data and reduce computing time. However, with more processing elements, the probability that these systems will experience a random bit-flip error that corrupts a program's output also increases, which is often recognized as silent data corruption. Analyzing the resiliency of HPC applications in extreme-scale computing to silent data corruption is crucial but difficult. An HPC application often contains a large number of computation units that need to be tested, and error propagation caused by error corruption is complex and difficult to interpret. To accommodate this challenge, we propose an interactive visualization system that helps HPC researchers understand the resiliency of HPC applications and compare their error propagation. Our system models an application's error propagation to study a program's resiliency by constructing and visualizing its fault tolerance boundary. Coordinating with multiple interactive designs, our system enables domain experts to efficiently explore the complicated spatial and temporal correlation between error propagations. At the end, the system integrated a nonmonotonic error propagation analysis with an adjustable graph propagation visualization to help domain experts examine the details of error propagation and answer such questions as why an error is mitigated or amplified by program execution.",
"abstracts": [
{
"abstractType": "Regular",
"content": "High-performance computing (HPC) systems play a critical role in facilitating scientific discoveries. Their scale and complexity (e.g., the number of computational units and software stack) continue to grow as new systems are expected to process increasingly more data and reduce computing time. However, with more processing elements, the probability that these systems will experience a random bit-flip error that corrupts a program's output also increases, which is often recognized as silent data corruption. Analyzing the resiliency of HPC applications in extreme-scale computing to silent data corruption is crucial but difficult. An HPC application often contains a large number of computation units that need to be tested, and error propagation caused by error corruption is complex and difficult to interpret. To accommodate this challenge, we propose an interactive visualization system that helps HPC researchers understand the resiliency of HPC applications and compare their error propagation. Our system models an application's error propagation to study a program's resiliency by constructing and visualizing its fault tolerance boundary. Coordinating with multiple interactive designs, our system enables domain experts to efficiently explore the complicated spatial and temporal correlation between error propagations. At the end, the system integrated a nonmonotonic error propagation analysis with an adjustable graph propagation visualization to help domain experts examine the details of error propagation and answer such questions as why an error is mitigated or amplified by program execution.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "High-performance computing (HPC) systems play a critical role in facilitating scientific discoveries. Their scale and complexity (e.g., the number of computational units and software stack) continue to grow as new systems are expected to process increasingly more data and reduce computing time. However, with more processing elements, the probability that these systems will experience a random bit-flip error that corrupts a program's output also increases, which is often recognized as silent data corruption. Analyzing the resiliency of HPC applications in extreme-scale computing to silent data corruption is crucial but difficult. An HPC application often contains a large number of computation units that need to be tested, and error propagation caused by error corruption is complex and difficult to interpret. To accommodate this challenge, we propose an interactive visualization system that helps HPC researchers understand the resiliency of HPC applications and compare their error propagation. Our system models an application's error propagation to study a program's resiliency by constructing and visualizing its fault tolerance boundary. Coordinating with multiple interactive designs, our system enables domain experts to efficiently explore the complicated spatial and temporal correlation between error propagations. At the end, the system integrated a nonmonotonic error propagation analysis with an adjustable graph propagation visualization to help domain experts examine the details of error propagation and answer such questions as why an error is mitigated or amplified by program execution.",
"title": "A Visual Comparison of Silent Error Propagation",
"normalizedTitle": "A Visual Comparison of Silent Error Propagation",
"fno": "09993758",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Resilience",
"Time Series Analysis",
"Visualization",
"Task Analysis",
"Fault Tolerant Systems",
"Fault Tolerance",
"Error Propagation",
"Fault Tolerance Boundary",
"Graph Visualization",
"Information Visualization",
"Silent Data Corruption"
],
"authors": [
{
"givenName": "Zhimin",
"surname": "Li",
"fullName": "Zhimin Li",
"affiliation": "Scientific Computing and Imaging Institute, University of Utah, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Harshitha",
"surname": "Menon",
"fullName": "Harshitha Menon",
"affiliation": "Lawrence Livermore National Laboratory",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kathryn",
"surname": "Mohror",
"fullName": "Kathryn Mohror",
"affiliation": "Lawrence Livermore National Laboratory",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shusen",
"surname": "Liu",
"fullName": "Shusen Liu",
"affiliation": "Lawrence Livermore National Laboratory",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Luanzheng",
"surname": "Guo",
"fullName": "Luanzheng Guo",
"affiliation": "Pacific Northwest National Laboratory",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Peer-Timo",
"surname": "Bremer",
"fullName": "Peer-Timo Bremer",
"affiliation": "Lawrence Livermore National Laboratory",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Valerio",
"surname": "Pascucci",
"fullName": "Valerio Pascucci",
"affiliation": "Scientific Computing and Imaging Institute, University of Utah, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/sc/2015/3723/0/2807670",
"title": "Understanding the propagation of transient errors in HPC applications",
"doi": null,
"abstractUrl": "/proceedings-article/sc/2015/2807670/12OmNBUAvXC",
"parentPublication": {
"id": "proceedings/sc/2015/3723/0",
"title": "SC15: International Conference for High-Performance Computing, Networking, Storage and Analysis",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipdps/2011/4385/0/4385a287",
"title": "Hauberk: Lightweight Silent Data Corruption Error Detector for GPGPU",
"doi": null,
"abstractUrl": "/proceedings-article/ipdps/2011/4385a287/12OmNrNh0wM",
"parentPublication": {
"id": "proceedings/ipdps/2011/4385/0",
"title": "Parallel and Distributed Processing Symposium, International",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsn-w/2017/2272/0/2272a153",
"title": "Modeling Error Propagation in Programs",
"doi": null,
"abstractUrl": "/proceedings-article/dsn-w/2017/2272a153/12OmNwIHoty",
"parentPublication": {
"id": "proceedings/dsn-w/2017/2272/0",
"title": "2017 47th Annual IEEE/IFIP International Conference on Dependable Systems and Networks Workshop (DSN-W)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cluster/2015/6598/0/6598a541",
"title": "Understanding the Propagation of Error Due to a Silent Data Corruption in a Sparse Matrix Vector Multiply",
"doi": null,
"abstractUrl": "/proceedings-article/cluster/2015/6598a541/12OmNwdL7qN",
"parentPublication": {
"id": "proceedings/cluster/2015/6598/0",
"title": "2015 IEEE International Conference on Cluster Computing (CLUSTER)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sc/2018/8384/0/838400a094",
"title": "FlipTracker: Understanding Natural Error Resilience in HPC Applications",
"doi": null,
"abstractUrl": "/proceedings-article/sc/2018/838400a094/17D45Xq6dCQ",
"parentPublication": {
"id": "proceedings/sc/2018/8384/0",
"title": "2018 SC18: The International Conference for High Performance Computing, Networking, Storage, and Analysis (SC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sc/2022/5444/0/544400a219",
"title": "Mitigating Silent Data Corruptions in HPC Applications across Multiple Program Inputs",
"doi": null,
"abstractUrl": "/proceedings-article/sc/2022/544400a219/1I0bSPUDeRa",
"parentPublication": {
"id": "proceedings/sc/2022/5444/0/",
"title": "SC22: International Conference for High Performance Computing, Networking, Storage and Analysis",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dasc-picom-cbdcom-cyberscitech/2022/6297/0/09927870",
"title": "Paralellism-Based Techniques for Slowing Down Soft Error Propagation",
"doi": null,
"abstractUrl": "/proceedings-article/dasc-picom-cbdcom-cyberscitech/2022/09927870/1J4CiZhvtLO",
"parentPublication": {
"id": "proceedings/dasc-picom-cbdcom-cyberscitech/2022/6297/0",
"title": "2022 IEEE Intl Conf on Dependable, Autonomic and Secure Computing, Intl Conf on Pervasive Intelligence and Computing, Intl Conf on Cloud and Big Data Computing, Intl Conf on Cyber Science and Technology Congress (DASC/PiCom/CBDCom/CyberSciTech)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sc/2022/5444/0/544400a219",
"title": "Mitigating Silent Data Corruptions in HPC Applications across Multiple Program Inputs",
"doi": null,
"abstractUrl": "/proceedings-article/sc/2022/544400a219/1L07kBWvdpS",
"parentPublication": {
"id": "proceedings/sc/2022/5444/0/",
"title": "SC22: International Conference for High Performance Computing, Networking, Storage and Analysis",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/10/09094379",
"title": "SpotSDC: Revealing the Silent Data Corruption Propagation in High-Performance Computing Systems",
"doi": null,
"abstractUrl": "/journal/tg/2021/10/09094379/1jQNs0xudBS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ftxs/2020/1080/0/108000a001",
"title": "Improving Scalability of Silent-Error Resilience for Message-Passing Solvers via Local Recovery and Asynchrony",
"doi": null,
"abstractUrl": "/proceedings-article/ftxs/2020/108000a001/1pZ12PsdHTa",
"parentPublication": {
"id": "proceedings/ftxs/2020/1080/0",
"title": "2020 IEEE/ACM 10th Workshop on Fault Tolerance for HPC at eXtreme Scale (FTXS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09993764",
"articleId": "1Jgw0XrwLsI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09993762",
"articleId": "1Jgw2iruIyk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1JilXEyh5kY",
"name": "ttg555501-09993758s1-supp2-3230636.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09993758s1-supp2-3230636.mp4",
"extension": "mp4",
"size": "12 MB",
"__typename": "WebExtraType"
},
{
"id": "1JilXIXCNB6",
"name": "ttg555501-09993758s1-supp1-3230636.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09993758s1-supp1-3230636.pdf",
"extension": "pdf",
"size": "754 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1JgvW1I7Gko",
"doi": "10.1109/TVCG.2022.3230739",
"abstract": "How to create an efficient and accurate interactive tool for triangular mesh clipping is one of the key problems to be solved in computer-assisted surgical planning. Although the existing algorithms can realize three-dimensional model clipping, problems still remain unsolved regarding the flexibility of clipping paths and the capping of clipped cross-sections. In this study, we propose a mesh clipping algorithm for surgical planning based on polygonal convex partitioning. Firstly, two-dimensional polygonal regions are extended to three-dimensional clipping paths generated from selected reference points. Secondly, the convex regions are partitioned with a recursive algorithm to obtain the clipped and residual models with closed surfaces. Finally, surgical planning software with the function of mesh clipping has been developed, which is capable to create complex clipping paths by normal vector adjustment and thickness control. The robustness and efficiency of our algorithm have been demonstrated by surgical planning of craniomaxillofacial osteotomy, pelvis tumor resection and cranial vault remodeling.",
"abstracts": [
{
"abstractType": "Regular",
"content": "How to create an efficient and accurate interactive tool for triangular mesh clipping is one of the key problems to be solved in computer-assisted surgical planning. Although the existing algorithms can realize three-dimensional model clipping, problems still remain unsolved regarding the flexibility of clipping paths and the capping of clipped cross-sections. In this study, we propose a mesh clipping algorithm for surgical planning based on polygonal convex partitioning. Firstly, two-dimensional polygonal regions are extended to three-dimensional clipping paths generated from selected reference points. Secondly, the convex regions are partitioned with a recursive algorithm to obtain the clipped and residual models with closed surfaces. Finally, surgical planning software with the function of mesh clipping has been developed, which is capable to create complex clipping paths by normal vector adjustment and thickness control. The robustness and efficiency of our algorithm have been demonstrated by surgical planning of craniomaxillofacial osteotomy, pelvis tumor resection and cranial vault remodeling.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "How to create an efficient and accurate interactive tool for triangular mesh clipping is one of the key problems to be solved in computer-assisted surgical planning. Although the existing algorithms can realize three-dimensional model clipping, problems still remain unsolved regarding the flexibility of clipping paths and the capping of clipped cross-sections. In this study, we propose a mesh clipping algorithm for surgical planning based on polygonal convex partitioning. Firstly, two-dimensional polygonal regions are extended to three-dimensional clipping paths generated from selected reference points. Secondly, the convex regions are partitioned with a recursive algorithm to obtain the clipped and residual models with closed surfaces. Finally, surgical planning software with the function of mesh clipping has been developed, which is capable to create complex clipping paths by normal vector adjustment and thickness control. The robustness and efficiency of our algorithm have been demonstrated by surgical planning of craniomaxillofacial osteotomy, pelvis tumor resection and cranial vault remodeling.",
"title": "3D Surface-Closed Mesh Clipping Based on Polygonal Partitioning for Surgical Planning",
"normalizedTitle": "3D Surface-Closed Mesh Clipping Based on Polygonal Partitioning for Surgical Planning",
"fno": "09993760",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Surgery",
"Planning",
"Partitioning Algorithms",
"Computational Modeling",
"Three Dimensional Displays",
"Software Algorithms",
"Software",
"Model Clipping",
"Partitioning",
"Surgical Planning",
"Triangular Mesh"
],
"authors": [
{
"givenName": "Mingjun",
"surname": "Gong",
"fullName": "Mingjun Gong",
"affiliation": "Institute of Biomedical Manufacturing and Life Quality Engineering, School of Mechanical Engineering, Shanghai Jiao Tong University, Shanghai, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiaojun",
"surname": "Chen",
"fullName": "Xiaojun Chen",
"affiliation": "School of Mechanical Engineering",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cbms/2015/6775/0/6775a029",
"title": "Use of 3D Printing in Surgical Planning: Strategies for Risk Analysis and User Involvement",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2015/6775a029/12OmNApLGxV",
"parentPublication": {
"id": "proceedings/cbms/2015/6775/0",
"title": "2015 IEEE 28th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1996/3673/0/36730021",
"title": "Anatomy-Based Facial Tissue Modeling Using the Finite Element Method",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1996/36730021/12OmNC2fGrv",
"parentPublication": {
"id": "proceedings/ieee-vis/1996/3673/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/miar/2001/1113/0/11130021",
"title": "Interactive Surgical Planning Using Context Based Volume Visualization Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/miar/2001/11130021/12OmNqBtiRo",
"parentPublication": {
"id": "proceedings/miar/2001/1113/0",
"title": "Medical Imaging and Augmented Reality, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpp/2014/5618/0/5618a241",
"title": "Output-Sensitive Parallel Algorithm for Polygon Clipping",
"doi": null,
"abstractUrl": "/proceedings-article/icpp/2014/5618a241/12OmNx3ZjaB",
"parentPublication": {
"id": "proceedings/icpp/2014/5618/0",
"title": "2014 43nd International Conference on Parallel Processing (ICPP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a269",
"title": "A Mixed-Reality System for Breast Surgical Planning",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a269/12OmNyaXPPl",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2015/7204/0/7204a176",
"title": "A Collaborative and Immersive VR Simulator for Education and Assessment of Surgical Teams",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2015/7204a176/12OmNzcxZ6o",
"parentPublication": {
"id": "proceedings/svr/2015/7204/0",
"title": "2015 XVII Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1996/01/mcg1996010046",
"title": "Assessing Craniofacial Surgical Simulation",
"doi": null,
"abstractUrl": "/magazine/cg/1996/01/mcg1996010046/13rRUy0ZzUT",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07539313",
"title": "PelVis: Atlas-based Surgical Planning for Oncological Pelvic Surgery",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07539313/13rRUygT7n1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a410",
"title": "Towards Virtual Teaching Hospitals for Advanced Surgical Training",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a410/1CJetJYrF3q",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090485",
"title": "Pipeline of anatomical models generation. Experience of surgical planning and medical personal training by means of virtual environments and physical prototyping",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090485/1jIxqx9VfS8",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09992129",
"articleId": "1JevCrH10vS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09993764",
"articleId": "1Jgw0XrwLsI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1JilVzyIRxe",
"name": "ttg555501-09993760s1-supp1-3230739.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09993760s1-supp1-3230739.mp4",
"extension": "mp4",
"size": "16.8 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Jgw2iruIyk",
"doi": "10.1109/TVCG.2022.3230855",
"abstract": "Videos are an accessible form of media for analyzing sports postures and providing feedback to athletes. Existing sport-specific systems embed bespoke human pose attributes and thus can be hard to scale for new attributes, especially for users without programming experiences. Some systems retain scalability by directly showing the differences between two poses, but they might not clearly visualize the key differences that viewers would like to pursue. Besides, video-based coaching systems often present feedback on the correctness of poses by augmenting videos with visual markers or reference poses. However, previewing and augmenting videos limit the analysis and visualization of human poses due to the fixed viewpoints in videos, which confine the observation of captured human movements and cause ambiguity in the augmented feedback. To address these issues, we study customizable human pose data analysis and visualization in the context of running pose attributes, such as joint angles and step distances. Based on existing literature and a formative study, we have designed and implemented a system, <italic>PoseCoach</italic>, to provide feedback on running poses for amateurs by comparing the running poses between a novice and an expert. <italic>PoseCoach</italic> adopts a customizable data analysis model to allow users' controllability in defining pose attributes of their interests through our interface. To avoid the influence of viewpoint differences and provide intuitive feedback, <italic>PoseCoach</italic> visualizes the pose differences as part-based 3D animations on a human model to imitate the demonstration of a human coach. We conduct a user study to verify our design components and conduct expert interviews to evaluate the usefulness of the system.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Videos are an accessible form of media for analyzing sports postures and providing feedback to athletes. Existing sport-specific systems embed bespoke human pose attributes and thus can be hard to scale for new attributes, especially for users without programming experiences. Some systems retain scalability by directly showing the differences between two poses, but they might not clearly visualize the key differences that viewers would like to pursue. Besides, video-based coaching systems often present feedback on the correctness of poses by augmenting videos with visual markers or reference poses. However, previewing and augmenting videos limit the analysis and visualization of human poses due to the fixed viewpoints in videos, which confine the observation of captured human movements and cause ambiguity in the augmented feedback. To address these issues, we study customizable human pose data analysis and visualization in the context of running pose attributes, such as joint angles and step distances. Based on existing literature and a formative study, we have designed and implemented a system, <italic>PoseCoach</italic>, to provide feedback on running poses for amateurs by comparing the running poses between a novice and an expert. <italic>PoseCoach</italic> adopts a customizable data analysis model to allow users' controllability in defining pose attributes of their interests through our interface. To avoid the influence of viewpoint differences and provide intuitive feedback, <italic>PoseCoach</italic> visualizes the pose differences as part-based 3D animations on a human model to imitate the demonstration of a human coach. We conduct a user study to verify our design components and conduct expert interviews to evaluate the usefulness of the system.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Videos are an accessible form of media for analyzing sports postures and providing feedback to athletes. Existing sport-specific systems embed bespoke human pose attributes and thus can be hard to scale for new attributes, especially for users without programming experiences. Some systems retain scalability by directly showing the differences between two poses, but they might not clearly visualize the key differences that viewers would like to pursue. Besides, video-based coaching systems often present feedback on the correctness of poses by augmenting videos with visual markers or reference poses. However, previewing and augmenting videos limit the analysis and visualization of human poses due to the fixed viewpoints in videos, which confine the observation of captured human movements and cause ambiguity in the augmented feedback. To address these issues, we study customizable human pose data analysis and visualization in the context of running pose attributes, such as joint angles and step distances. Based on existing literature and a formative study, we have designed and implemented a system, PoseCoach, to provide feedback on running poses for amateurs by comparing the running poses between a novice and an expert. PoseCoach adopts a customizable data analysis model to allow users' controllability in defining pose attributes of their interests through our interface. To avoid the influence of viewpoint differences and provide intuitive feedback, PoseCoach visualizes the pose differences as part-based 3D animations on a human model to imitate the demonstration of a human coach. We conduct a user study to verify our design components and conduct expert interviews to evaluate the usefulness of the system.",
"title": "PoseCoach: A Customizable Analysis and Visualization System for Video-based Running Coaching",
"normalizedTitle": "PoseCoach: A Customizable Analysis and Visualization System for Video-based Running Coaching",
"fno": "09993762",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Videos",
"Sports",
"Three Dimensional Displays",
"Data Visualization",
"Data Analysis",
"Analytical Models",
"Solid Modeling",
"Human Pose",
"Video Processing",
"Sports Data Analysis"
],
"authors": [
{
"givenName": "Jingyuan",
"surname": "Liu",
"fullName": "Jingyuan Liu",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Nazmus",
"surname": "Saquib",
"fullName": "Nazmus Saquib",
"affiliation": "Tero Labs, California, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhutian",
"surname": "Chen",
"fullName": "Zhutian Chen",
"affiliation": "Harvard University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rubaiat Habib",
"surname": "Kazi",
"fullName": "Rubaiat Habib Kazi",
"affiliation": "Adobe Research",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Li-Yi",
"surname": "Wei",
"fullName": "Li-Yi Wei",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hongbo",
"surname": "Fu",
"fullName": "Hongbo Fu",
"affiliation": "City University of Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chiew-Lan",
"surname": "Tai",
"fullName": "Chiew-Lan Tai",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2016/8851/0/8851b506",
"title": "3D Action Recognition from Novel Viewpoints",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851b506/12OmNwt5slt",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2019/1198/0/119800a163",
"title": "Efficient Multi-person Hierarchical 3D Pose Estimation for Autonomous Driving",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2019/119800a163/19wB22E8lb2",
"parentPublication": {
"id": "proceedings/mipr/2019/1198/0",
"title": "2019 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ai/5555/01/09745764",
"title": "Deep reconstruction of 3D human poses from video",
"doi": null,
"abstractUrl": "/journal/ai/5555/01/09745764/1CbVpC8IS8o",
"parentPublication": {
"id": "trans/ai",
"title": "IEEE Transactions on Artificial Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/09888037",
"title": "MPS-NeRF: Generalizable 3D Human Rendering From Multiview Images",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/09888037/1GBRkqcf7m8",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/09982410",
"title": "PoseBERT: A Generic Transformer Module for Temporal 3D Human Modeling",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/09982410/1J2T4N6o4Rq",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10050391",
"title": "Learning to Augment Poses for 3D Human Pose Estimation in Images and Videos",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10050391/1KYofZaXCTK",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/03/09174911",
"title": "Locally Connected Network for Monocular 3D Human Pose Estimation",
"doi": null,
"abstractUrl": "/journal/tp/2022/03/09174911/1myqF3drJ1S",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09413297",
"title": "The Role of Cycle Consistency for Generating Better Human Action Videos from a Single Frame",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09413297/1tmiBIzqMSc",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900i571",
"title": "PoseAug: A Differentiable Pose Augmentation Framework for 3D Human Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900i571/1yeKofrFGZa",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/12/09613748",
"title": "VPN++: Rethinking Video-Pose Embeddings for Understanding Activities of Daily Living",
"doi": null,
"abstractUrl": "/journal/tp/2022/12/09613748/1ythZkR4guc",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09993758",
"articleId": "1Jgw1XkmIEM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09998112",
"articleId": "1JlF32mS2SQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1JilYapk2nS",
"name": "ttg555501-09993762s1-supp1-3230855.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09993762s1-supp1-3230855.mp4",
"extension": "mp4",
"size": "65.2 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Jgw0XrwLsI",
"doi": "10.1109/TVCG.2022.3230746",
"abstract": "Kinesthetic feedback, the feeling of restriction or resistance when hands contact objects, is essential for natural freehand interaction in VR. However, inducing kinesthetic feedback using mechanical hardware can be cumbersome and hard to control in commodity VR systems. We propose the <italic>kine-appendage</italic> concept to compensate for the loss of kinesthetic feedback in virtual environments, i.e., a virtual appendage is added to the user's avatar hand; when the appendage contacts a virtual object, it exhibits transformations (rotation and deformation); when it disengages from the contact, it recovers its original appearance. A proof-of-concept <italic>kine-appendage</italic> technique, <italic>BrittleStylus</italic>, was designed to enhance isomorphic typing. Our empirical evaluations demonstrated that (i) BrittleStylus significantly reduced the uncorrected error rate of naive isomorphic typing from 6.53% to 1.92% without compromising the typing speed; (ii) BrittleStylus could induce the sense of kinesthetic feedback, the degree of which was parity with that induced by pseudo-haptic (+ visual cue) methods; and (iii) participants preferred BrittleStylus over pseudo-haptic (+ visual cue) methods because of not only good performance but also fluent hand movements.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Kinesthetic feedback, the feeling of restriction or resistance when hands contact objects, is essential for natural freehand interaction in VR. However, inducing kinesthetic feedback using mechanical hardware can be cumbersome and hard to control in commodity VR systems. We propose the <italic>kine-appendage</italic> concept to compensate for the loss of kinesthetic feedback in virtual environments, i.e., a virtual appendage is added to the user's avatar hand; when the appendage contacts a virtual object, it exhibits transformations (rotation and deformation); when it disengages from the contact, it recovers its original appearance. A proof-of-concept <italic>kine-appendage</italic> technique, <italic>BrittleStylus</italic>, was designed to enhance isomorphic typing. Our empirical evaluations demonstrated that (i) BrittleStylus significantly reduced the uncorrected error rate of naive isomorphic typing from 6.53% to 1.92% without compromising the typing speed; (ii) BrittleStylus could induce the sense of kinesthetic feedback, the degree of which was parity with that induced by pseudo-haptic (+ visual cue) methods; and (iii) participants preferred BrittleStylus over pseudo-haptic (+ visual cue) methods because of not only good performance but also fluent hand movements.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Kinesthetic feedback, the feeling of restriction or resistance when hands contact objects, is essential for natural freehand interaction in VR. However, inducing kinesthetic feedback using mechanical hardware can be cumbersome and hard to control in commodity VR systems. We propose the kine-appendage concept to compensate for the loss of kinesthetic feedback in virtual environments, i.e., a virtual appendage is added to the user's avatar hand; when the appendage contacts a virtual object, it exhibits transformations (rotation and deformation); when it disengages from the contact, it recovers its original appearance. A proof-of-concept kine-appendage technique, BrittleStylus, was designed to enhance isomorphic typing. Our empirical evaluations demonstrated that (i) BrittleStylus significantly reduced the uncorrected error rate of naive isomorphic typing from 6.53% to 1.92% without compromising the typing speed; (ii) BrittleStylus could induce the sense of kinesthetic feedback, the degree of which was parity with that induced by pseudo-haptic (+ visual cue) methods; and (iii) participants preferred BrittleStylus over pseudo-haptic (+ visual cue) methods because of not only good performance but also fluent hand movements.",
"title": "Kine-Appendage: Enhancing Freehand VR Interaction Through Transformations of Virtual Appendages",
"normalizedTitle": "Kine-Appendage: Enhancing Freehand VR Interaction Through Transformations of Virtual Appendages",
"fno": "09993764",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Avatars",
"Visualization",
"Tracking",
"Hardware",
"Haptic Interfaces",
"Performance Evaluation",
"Headphones",
"Isomorphic Typing",
"Virtual Appendage",
"Visual Kinesthetic Feedback",
"Visual Transformation"
],
"authors": [
{
"givenName": "Yang",
"surname": "Tian",
"fullName": "Yang Tian",
"affiliation": "Guangxi Key Laboratory of Multimedia Communications and Network Technology, Guangxi University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hualong",
"surname": "Bai",
"fullName": "Hualong Bai",
"affiliation": "Guangxi Key Laboratory of Multimedia Communications and Network Technology, Guangxi University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shengdong",
"surname": "Zhao",
"fullName": "Shengdong Zhao",
"affiliation": "NUS-HCI lab, National University of Singapore, Singapore",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chi-Wing",
"surname": "Fu",
"fullName": "Chi-Wing Fu",
"affiliation": "Department of Computer Science and Engineering and Institute of Medical Intelligence and XR, The Chinese University of Hong Kong, Hong Kong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chun",
"surname": "Yu",
"fullName": "Chun Yu",
"affiliation": "Department of Computer Science and Technology, Tsinghua University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Haozhao",
"surname": "Qin",
"fullName": "Haozhao Qin",
"affiliation": "Guangxi Key Laboratory of Multimedia Communications and Network Technology, Guangxi University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Qiong",
"surname": "Wang",
"fullName": "Qiong Wang",
"affiliation": "Guangdong Provincial Key Laboratory of Computer Vision and Virtual Reality Technology, Shenzhen Institutes of Advanced Technology, CAS, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Pheng-Ann",
"surname": "Heng",
"fullName": "Pheng-Ann Heng",
"affiliation": "Department of Computer Science and Engineering and Institute of Medical Intelligence and XR, The Chinese University of Hong Kong, Hong Kong, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-17",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/haptics/2010/6821/0/05444667",
"title": "Design of a novel finger haptic interface for contact and orientation display",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2010/05444667/12OmNAQJzQf",
"parentPublication": {
"id": "proceedings/haptics/2010/6821/0",
"title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504744",
"title": "bioSync: Wearable haptic I/O device for synchronous kinesthetic interaction",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504744/12OmNyKJiB6",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2015/9953/0/07344697",
"title": "Perception of congruent facial and kinesthetic expressions of emotions",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2015/07344697/12OmNynJMEK",
"parentPublication": {
"id": "proceedings/acii/2015/9953/0",
"title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2010/6821/0/05444646",
"title": "Simplified design of haptic display by extending one-point kinesthetic feedback to multipoint tactile feedback",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2010/05444646/12OmNyvGylZ",
"parentPublication": {
"id": "proceedings/haptics/2010/6821/0",
"title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2014/02/06710112",
"title": "Effects of Kinesthetic and Cutaneous Stimulation During the Learning of a Viscous Force Field",
"doi": null,
"abstractUrl": "/journal/th/2014/02/06710112/13rRUNvgz4r",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2013/01/tth2013010013",
"title": "Comparison of Visual and Vibrotactile Feedback Methods for Seated Posture Guidance",
"doi": null,
"abstractUrl": "/journal/th/2013/01/tth2013010013/13rRUxcKzVp",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699269",
"title": "Designing Haptics: Comparing Two Virtual Reality Gloves with Respect to Realism, Performance and Comfort",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699269/19F1OMxa4Kc",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699248",
"title": "DualGaze: Addressing the Midas Touch Problem in Gaze Mediated VR Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699248/19F1R5RaLFS",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798159",
"title": "Towards an Affordable Virtual Reality Solution for Cardiopulmonary Resuscitation Training",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798159/1cJ0OTTPhdu",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/07/09273221",
"title": "Crowd Navigation in VR: Exploring Haptic Rendering of Collisions",
"doi": null,
"abstractUrl": "/journal/tg/2022/07/09273221/1pb9BhAe16o",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09993760",
"articleId": "1JgvW1I7Gko",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09993758",
"articleId": "1Jgw1XkmIEM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1JilW9Dcnny",
"name": "ttg555501-09993764s1-supp1-3230746.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09993764s1-supp1-3230746.mp4",
"extension": "mp4",
"size": "66.5 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1JevAUCOqyc",
"doi": "10.1109/TVCG.2022.3229953",
"abstract": "Numerous patterns found in urban phenomena, such as air pollution and human mobility, can be characterized as many directed geospatial networks (geo-networks) that represent spreading processes in urban space. These geo-networks can be analyzed from multiple levels, ranging from the <italic>macro</italic>-level of summarizing all geo-networks, <italic>meso</italic>-level of comparing or summarizing parts of geo-networks, and <italic>micro</italic>-level of inspecting individual geo-networks. Most of the existing visualizations cannot support multilevel analysis well. These techniques work by: 1) showing geo-networks separately with multiple maps leads to heavy context switching costs between different maps; 2) summarizing all geo-networks into a single network can lead to the loss of individual information; 3) drawing all geo-networks onto one map might suffer from the visual scalability issue in distinguishing individual geo-networks. In this study, we propose <italic>GeoNetverse</italic>, a novel visualization technique for analyzing aggregate geo-networks from multiple levels. Inspired by metro maps, GeoNetverse balances the overview and details of the geo-networks by placing the edges shared between geo-networks in a stacked manner. To enhance the visual scalability, GeoNetverse incorporates a level-of-detail rendering, a progressive crossing minimization, and a coloring technique. A set of evaluations was conducted to evaluate GeoNetverse from multiple perspectives.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Numerous patterns found in urban phenomena, such as air pollution and human mobility, can be characterized as many directed geospatial networks (geo-networks) that represent spreading processes in urban space. These geo-networks can be analyzed from multiple levels, ranging from the <italic>macro</italic>-level of summarizing all geo-networks, <italic>meso</italic>-level of comparing or summarizing parts of geo-networks, and <italic>micro</italic>-level of inspecting individual geo-networks. Most of the existing visualizations cannot support multilevel analysis well. These techniques work by: 1) showing geo-networks separately with multiple maps leads to heavy context switching costs between different maps; 2) summarizing all geo-networks into a single network can lead to the loss of individual information; 3) drawing all geo-networks onto one map might suffer from the visual scalability issue in distinguishing individual geo-networks. In this study, we propose <italic>GeoNetverse</italic>, a novel visualization technique for analyzing aggregate geo-networks from multiple levels. Inspired by metro maps, GeoNetverse balances the overview and details of the geo-networks by placing the edges shared between geo-networks in a stacked manner. To enhance the visual scalability, GeoNetverse incorporates a level-of-detail rendering, a progressive crossing minimization, and a coloring technique. A set of evaluations was conducted to evaluate GeoNetverse from multiple perspectives.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Numerous patterns found in urban phenomena, such as air pollution and human mobility, can be characterized as many directed geospatial networks (geo-networks) that represent spreading processes in urban space. These geo-networks can be analyzed from multiple levels, ranging from the macro-level of summarizing all geo-networks, meso-level of comparing or summarizing parts of geo-networks, and micro-level of inspecting individual geo-networks. Most of the existing visualizations cannot support multilevel analysis well. These techniques work by: 1) showing geo-networks separately with multiple maps leads to heavy context switching costs between different maps; 2) summarizing all geo-networks into a single network can lead to the loss of individual information; 3) drawing all geo-networks onto one map might suffer from the visual scalability issue in distinguishing individual geo-networks. In this study, we propose GeoNetverse, a novel visualization technique for analyzing aggregate geo-networks from multiple levels. Inspired by metro maps, GeoNetverse balances the overview and details of the geo-networks by placing the edges shared between geo-networks in a stacked manner. To enhance the visual scalability, GeoNetverse incorporates a level-of-detail rendering, a progressive crossing minimization, and a coloring technique. A set of evaluations was conducted to evaluate GeoNetverse from multiple perspectives.",
"title": "Multilevel Visual Analysis of Aggregate Geo-Networks",
"normalizedTitle": "Multilevel Visual Analysis of Aggregate Geo-Networks",
"fno": "09991899",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visualization",
"Rendering Computer Graphics",
"Switches",
"Scalability",
"Minimization",
"Aggregates",
"Three Dimensional Displays",
"Geospatial Network",
"Multilevel Analysis",
"Information Visualization",
"Graph Drawing"
],
"authors": [
{
"givenName": "Zikun",
"surname": "Deng",
"fullName": "Zikun Deng",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shifu",
"surname": "Chen",
"fullName": "Shifu Chen",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiao",
"surname": "Xie",
"fullName": "Xiao Xie",
"affiliation": "Department of Sport Science, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Guodao",
"surname": "Sun",
"fullName": "Guodao Sun",
"affiliation": "College of Computer Science and Technology, Zhejiang University of Technology, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mingliang",
"surname": "Xu",
"fullName": "Mingliang Xu",
"affiliation": "School of Computer and Artificial Intelligence, Zhengzhou University, Zhengzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Di",
"surname": "Weng",
"fullName": "Di Weng",
"affiliation": "Microsoft Research Asia, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yingcai",
"surname": "Wu",
"fullName": "Yingcai Wu",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-16",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2014/4308/0/4308a307",
"title": "Automatic Geo-location Correction of Satellite Imagery",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2014/4308a307/12OmNBNM96n",
"parentPublication": {
"id": "proceedings/cvprw/2014/4308/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2015/9711/0/5720b036",
"title": "Single Frame Based Video Geo-Localisation Using Structure Projection",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2015/5720b036/12OmNvStctS",
"parentPublication": {
"id": "proceedings/iccvw/2015/9711/0",
"title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aipr/2003/2029/0/20290244",
"title": "Geo-spatial Active Visual Surveillance on Wireless Networks",
"doi": null,
"abstractUrl": "/proceedings-article/aipr/2003/20290244/12OmNwD1q75",
"parentPublication": {
"id": "proceedings/aipr/2003/2029/0",
"title": "32nd Applied Imagery Pattern Recognition Workshop, 2003. Proceedings.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2016/5698/0/07907496",
"title": "A Modular Rule-Based Visual Interactive Creation of Tree-Shaped Geo-Located Networks",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2016/07907496/12OmNzlUKAW",
"parentPublication": {
"id": "proceedings/sitis/2016/5698/0",
"title": "2016 12th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sec/2021/8390/0/839000a297",
"title": "AggNet: Cost-Aware Aggregation Networks for Geo-distributed Streaming Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/sec/2021/839000a297/1B2HarRl2rC",
"parentPublication": {
"id": "proceedings/sec/2021/8390/0",
"title": "2021 IEEE/ACM Symposium on Edge Computing (SEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/5555/01/09935277",
"title": "Continuous Geo-Social Group Monitoring in Dynamic LBSNs",
"doi": null,
"abstractUrl": "/journal/tk/5555/01/09935277/1HYqASRP8Vq",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09917516",
"title": "Geo-Storylines: Integrating Maps into Storyline Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09917516/1HrexIf2zZe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10016752",
"title": "Convolution-enhanced Evolving Attention Networks",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10016752/1JTZY9PMISI",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600c839",
"title": "TransVLAD: Multi-Scale Attention-Based Global Descriptors for Visual Geo-Localization",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600c839/1KxUYksZUs0",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/sc/5555/01/10109840",
"title": "Geo-distributed Multi-tier Workload Migration over Multi-timescale Electricity Markets",
"doi": null,
"abstractUrl": "/journal/sc/5555/01/10109840/1MGxFuKt77W",
"parentPublication": {
"id": "trans/sc",
"title": "IEEE Transactions on Services Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09987696",
"articleId": "1J7ROVH0hpu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09992117",
"articleId": "1JevBim1nIA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1JgvWO0ubsY",
"name": "ttg555501-09991899s1-supp1-3229953.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09991899s1-supp1-3229953.mp4",
"extension": "mp4",
"size": "77.4 MB",
"__typename": "WebExtraType"
},
{
"id": "1JgvWjBZgUE",
"name": "ttg555501-09991899s1-supp2-3229953.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09991899s1-supp2-3229953.pdf",
"extension": "pdf",
"size": "5.39 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1JevBim1nIA",
"doi": "10.1109/TVCG.2022.3230445",
"abstract": "We present a novel framework for 3D tomographic reconstruction and visualization of tomograms from noisy electron microscopy tilt-series. Our technique takes as an input aligned tilt-series from cryogenic electron microscopy and creates denoised 3D tomograms using a proximal jointly-optimized approach that iteratively performs reconstruction and denoising, relieving the users of the need to select appropriate denoising algorithms in the pre-reconstruction or post-reconstruction steps. The whole process is accelerated by exploiting parallelism on modern GPUs, and the results can be visualized immediately after the reconstruction using volume rendering tools incorporated in the framework. We show that our technique can be used with multiple combinations of reconstruction algorithms and regularizers, thanks to the flexibility provided by proximal algorithms. Additionally, the reconstruction framework is open-source and can be easily extended with additional reconstruction and denoising methods. Furthermore, our approach enables visualization of reconstruction error throughout the iterative process within the reconstructed tomogram and on projection planes of the input tilt-series. We evaluate our approach in comparison with state-of-the-art approaches and additionally show how our error visualization can be used for reconstruction evaluation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a novel framework for 3D tomographic reconstruction and visualization of tomograms from noisy electron microscopy tilt-series. Our technique takes as an input aligned tilt-series from cryogenic electron microscopy and creates denoised 3D tomograms using a proximal jointly-optimized approach that iteratively performs reconstruction and denoising, relieving the users of the need to select appropriate denoising algorithms in the pre-reconstruction or post-reconstruction steps. The whole process is accelerated by exploiting parallelism on modern GPUs, and the results can be visualized immediately after the reconstruction using volume rendering tools incorporated in the framework. We show that our technique can be used with multiple combinations of reconstruction algorithms and regularizers, thanks to the flexibility provided by proximal algorithms. Additionally, the reconstruction framework is open-source and can be easily extended with additional reconstruction and denoising methods. Furthermore, our approach enables visualization of reconstruction error throughout the iterative process within the reconstructed tomogram and on projection planes of the input tilt-series. We evaluate our approach in comparison with state-of-the-art approaches and additionally show how our error visualization can be used for reconstruction evaluation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a novel framework for 3D tomographic reconstruction and visualization of tomograms from noisy electron microscopy tilt-series. Our technique takes as an input aligned tilt-series from cryogenic electron microscopy and creates denoised 3D tomograms using a proximal jointly-optimized approach that iteratively performs reconstruction and denoising, relieving the users of the need to select appropriate denoising algorithms in the pre-reconstruction or post-reconstruction steps. The whole process is accelerated by exploiting parallelism on modern GPUs, and the results can be visualized immediately after the reconstruction using volume rendering tools incorporated in the framework. We show that our technique can be used with multiple combinations of reconstruction algorithms and regularizers, thanks to the flexibility provided by proximal algorithms. Additionally, the reconstruction framework is open-source and can be easily extended with additional reconstruction and denoising methods. Furthermore, our approach enables visualization of reconstruction error throughout the iterative process within the reconstructed tomogram and on projection planes of the input tilt-series. We evaluate our approach in comparison with state-of-the-art approaches and additionally show how our error visualization can be used for reconstruction evaluation.",
"title": "GPU Accelerated 3D Tomographic Reconstruction and Visualization from Noisy Electron Microscopy Tilt-Series",
"normalizedTitle": "GPU Accelerated 3D Tomographic Reconstruction and Visualization from Noisy Electron Microscopy Tilt-Series",
"fno": "09992117",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Image Reconstruction",
"Noise Reduction",
"Uncertainty",
"Three Dimensional Displays",
"Iterative Methods",
"Visualization",
"Electron Microscopy",
"Tomographic Reconstruction",
"Electron Tomography",
"Tilt Series",
"Visualization",
"Cryo ET",
"GPU Acceleration"
],
"authors": [
{
"givenName": "Julio Rey",
"surname": "Ramirez",
"fullName": "Julio Rey Ramirez",
"affiliation": "Visual Computing Center at King Abdullah University of Science and Technology (KAUST), Thuwal, Kingdom of Saudi Arabia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Peter",
"surname": "Rautek",
"fullName": "Peter Rautek",
"affiliation": "Visual Computing Center at King Abdullah University of Science and Technology (KAUST), Thuwal, Kingdom of Saudi Arabia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ciril",
"surname": "Bohak",
"fullName": "Ciril Bohak",
"affiliation": "Visual Computing Center at King Abdullah University of Science and Technology (KAUST), Thuwal, Kingdom of Saudi Arabia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ondřej",
"surname": "Strnad",
"fullName": "Ondřej Strnad",
"affiliation": "Visual Computing Center at King Abdullah University of Science and Technology (KAUST), Thuwal, Kingdom of Saudi Arabia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zheyuan",
"surname": "Zhang",
"fullName": "Zheyuan Zhang",
"affiliation": "School of Life Sciences, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sai",
"surname": "Li",
"fullName": "Sai Li",
"affiliation": "School of Life Sciences, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ivan",
"surname": "Viola",
"fullName": "Ivan Viola",
"affiliation": "Visual Computing Center at King Abdullah University of Science and Technology (KAUST), Thuwal, Kingdom of Saudi Arabia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wolfgang",
"surname": "Heidrich",
"fullName": "Wolfgang Heidrich",
"affiliation": "Visual Computing Center at King Abdullah University of Science and Technology (KAUST), Thuwal, Kingdom of Saudi Arabia",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icip/1998/8821/2/882120706",
"title": "Reconstruction problems in 3D for viral cryo electron microscopy",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1998/882120706/12OmNBpEeWP",
"parentPublication": {
"id": "proceedings/icip/1998/8821/3",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2017/0733/0/0733b084",
"title": "Multi-Resolution Data Fusion for Super-Resolution Electron Microscopy",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2017/0733b084/12OmNqNXEsL",
"parentPublication": {
"id": "proceedings/cvprw/2017/0733/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1988/9999/0/00196730",
"title": "3-D electron microscopy and incomplete angular coverage: a restoration scheme based on projections onto convex sets",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1988/00196730/12OmNvTTcat",
"parentPublication": {
"id": "proceedings/icassp/1988/9999/0",
"title": "ICASSP-88., International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1990/2062/1/00118248",
"title": "A multiresolution approach to the 3D reconstruction of a 50S ribosome from an EM-tilt series solving the alignment problem without gold particles (electron microscopy)",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1990/00118248/12OmNxQOjGj",
"parentPublication": {
"id": "proceedings/icpr/1990/2062/1",
"title": "Proceedings 10th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciibms/2015/8562/0/07439468",
"title": "Automatically pick fiducial markers in electron tomography tilt images",
"doi": null,
"abstractUrl": "/proceedings-article/iciibms/2015/07439468/12OmNzcxZmo",
"parentPublication": {
"id": "proceedings/iciibms/2015/8562/0",
"title": "2015 International Conference on Intelligent Informatics and Biomedical Sciences (ICIIBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1997/8183/1/81831173",
"title": "Three-dimensional reconstruction of noisy electron microscopy virus particle images",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1997/81831173/12OmNzn391t",
"parentPublication": {
"id": "proceedings/icip/1997/8183/1",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciibms/2017/6664/0/08279738",
"title": "Semi-automatically aligned tilt images in electron tomography",
"doi": null,
"abstractUrl": "/proceedings-article/iciibms/2017/08279738/12OmNzuIjlr",
"parentPublication": {
"id": "proceedings/iciibms/2017/6664/0",
"title": "2017 International Conference on Intelligent Informatics and Biomedical Sciences (ICIIBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mlhpc/2018/0180/0/08638633",
"title": "Automated Labeling of Electron Microscopy Images Using Deep Learning",
"doi": null,
"abstractUrl": "/proceedings-article/mlhpc/2018/08638633/18jXU8u0DVS",
"parentPublication": {
"id": "proceedings/mlhpc/2018/0180/0",
"title": "2018 IEEE/ACM Machine Learning in HPC Environments (MLHPC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200c662",
"title": "A Hybrid Frequency-Spatial Domain Model for Sparse Image Reconstruction in Scanning Transmission Electron Microscopy",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200c662/1BmIoiM5Xag",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsd/2022/7404/0/740400a577",
"title": "CELR: Cloud Enhanced Local Reconstruction from low-dose sparse Scanning Electron Microscopy images",
"doi": null,
"abstractUrl": "/proceedings-article/dsd/2022/740400a577/1JF8h2TYLny",
"parentPublication": {
"id": "proceedings/dsd/2022/7404/0",
"title": "2022 25th Euromicro Conference on Digital System Design (DSD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09991899",
"articleId": "1JevAUCOqyc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09992151",
"articleId": "1JevBLSiUqA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1JgvZOXkwRq",
"name": "ttg555501-09992117s1-supp1-3230445.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09992117s1-supp1-3230445.pdf",
"extension": "pdf",
"size": "9.68 MB",
"__typename": "WebExtraType"
},
{
"id": "1JgvZonQFUY",
"name": "ttg555501-09992117s1-supp2-3230445.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09992117s1-supp2-3230445.mp4",
"extension": "mp4",
"size": "80.5 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1JevCrH10vS",
"doi": "10.1109/TVCG.2022.3230369",
"abstract": "Textureless objects, repetitive patterns and limited computational resources pose significant challenges to man-made structure reconstruction from images, because feature-points-based reconstruction methods usually fail due to the lack of distinct texture or ambiguous point matches. Meanwhile multi-view stereo approaches also suffer from high computational complexity. In this paper, we present a new framework to reconstruct 3D surfaces for buildings from multi-view images by leveraging another fundamental geometric primitive: line segments. To this end, we first propose a new multi-resolution line segment detector to extract 2D line segments from each image. Then, we construct a 3D line cloud by introducing an improved Line3D++ algorithm to match 2D line segments from different images. Finally, we reconstruct a complete and manifold surface mesh from 3D line segments by formulating a <italic>Bayesian probabilistic modeling problem</italic>, which accurately generates a set of underlying planes. This output model is simple and has low performance requirements for hardware devices. Experimental results demonstrate the validity of the proposed approach and its ability to generate abstract and compact surface meshes from the 3D line cloud with low computational costs.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Textureless objects, repetitive patterns and limited computational resources pose significant challenges to man-made structure reconstruction from images, because feature-points-based reconstruction methods usually fail due to the lack of distinct texture or ambiguous point matches. Meanwhile multi-view stereo approaches also suffer from high computational complexity. In this paper, we present a new framework to reconstruct 3D surfaces for buildings from multi-view images by leveraging another fundamental geometric primitive: line segments. To this end, we first propose a new multi-resolution line segment detector to extract 2D line segments from each image. Then, we construct a 3D line cloud by introducing an improved Line3D++ algorithm to match 2D line segments from different images. Finally, we reconstruct a complete and manifold surface mesh from 3D line segments by formulating a <italic>Bayesian probabilistic modeling problem</italic>, which accurately generates a set of underlying planes. This output model is simple and has low performance requirements for hardware devices. Experimental results demonstrate the validity of the proposed approach and its ability to generate abstract and compact surface meshes from the 3D line cloud with low computational costs.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Textureless objects, repetitive patterns and limited computational resources pose significant challenges to man-made structure reconstruction from images, because feature-points-based reconstruction methods usually fail due to the lack of distinct texture or ambiguous point matches. Meanwhile multi-view stereo approaches also suffer from high computational complexity. In this paper, we present a new framework to reconstruct 3D surfaces for buildings from multi-view images by leveraging another fundamental geometric primitive: line segments. To this end, we first propose a new multi-resolution line segment detector to extract 2D line segments from each image. Then, we construct a 3D line cloud by introducing an improved Line3D++ algorithm to match 2D line segments from different images. Finally, we reconstruct a complete and manifold surface mesh from 3D line segments by formulating a Bayesian probabilistic modeling problem, which accurately generates a set of underlying planes. This output model is simple and has low performance requirements for hardware devices. Experimental results demonstrate the validity of the proposed approach and its ability to generate abstract and compact surface meshes from the 3D line cloud with low computational costs.",
"title": "Line-Based 3D Building Abstraction and Polygonal Surface Reconstruction From Images",
"normalizedTitle": "Line-Based 3D Building Abstraction and Polygonal Surface Reconstruction From Images",
"fno": "09992129",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Three Dimensional Displays",
"Image Reconstruction",
"Surface Reconstruction",
"Image Segmentation",
"Solid Modeling",
"Buildings",
"Point Cloud Compression",
"3 D Reconstruction",
"3 D Line Cloud",
"Scene Abstraction",
"Polygonal Mesh Model"
],
"authors": [
{
"givenName": "Jianwei",
"surname": "Guo",
"fullName": "Jianwei Guo",
"affiliation": "Institute of Automation, Chinese Academy of Sciences, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yanchao",
"surname": "Liu",
"fullName": "Yanchao Liu",
"affiliation": "Shenzhen Key Laboratory of Visual Computing and Analytics (VisuCA), Shenzhen Institute of Advanced Technology (SIAT), Chinese Academy of Sciences, Shenzhen, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xin",
"surname": "Song",
"fullName": "Xin Song",
"affiliation": "Shenzhen Key Laboratory of Visual Computing and Analytics (VisuCA), Shenzhen Institute of Advanced Technology (SIAT), Chinese Academy of Sciences, Shenzhen, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Haoyu",
"surname": "Liu",
"fullName": "Haoyu Liu",
"affiliation": "Shenzhen Key Laboratory of Visual Computing and Analytics (VisuCA), Shenzhen Institute of Advanced Technology (SIAT), Chinese Academy of Sciences, Shenzhen, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiaopeng",
"surname": "Zhang",
"fullName": "Xiaopeng Zhang",
"affiliation": "Institute of Automation, Chinese Academy of Sciences, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhanglin",
"surname": "Cheng",
"fullName": "Zhanglin Cheng",
"affiliation": "Shenzhen Key Laboratory of Visual Computing and Analytics (VisuCA), Shenzhen Institute of Advanced Technology (SIAT), Chinese Academy of Sciences, Shenzhen, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/3dv/2015/8332/0/8332a264",
"title": "3D Surface Reconstruction from Point-and-Line Cloud",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a264/12OmNrAMEVf",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460427",
"title": "Shape reconstruction with globally-optimized surface point selection",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460427/12OmNwtEEGb",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/189P2A39",
"title": "Schematic surface reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/189P2A39/12OmNyNzhy9",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200f631",
"title": "Adaptive Surface Reconstruction with Multiscale Convolutional Kernels",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200f631/1BmI5bvaltC",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09839681",
"title": "SSRNet: Scalable 3D Surface Reconstruction Network",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09839681/1FisL8u19du",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600g292",
"title": "POCO: Point Convolution for Surface Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600g292/1H0KAZrauEo",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10093999",
"title": "ANISE: Assembly-based Neural Implicit Surface rEconstruction",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10093999/1M80HueHnJS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2019/3131/0/313100a553",
"title": "Surface Reconstruction from 3D Line Segments",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2019/313100a553/1ezRDwCMQNO",
"parentPublication": {
"id": "proceedings/3dv/2019/3131/0",
"title": "2019 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300k0122",
"title": "Deep Geometric Prior for Surface Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300k0122/1gyrJvG8Kt2",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800b432",
"title": "Efficiently Distributed Watertight Surface Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800b432/1zWE7llklAA",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09992151",
"articleId": "1JevBLSiUqA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09993760",
"articleId": "1JgvW1I7Gko",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1JilX29jKhO",
"name": "ttg555501-09992129s1-supp1-3230369.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09992129s1-supp1-3230369.pdf",
"extension": "pdf",
"size": "10.2 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1JevBLSiUqA",
"doi": "10.1109/TVCG.2022.3230541",
"abstract": "We present a learning-based approach for generating 3D facial animations with the motion style of a specific subject from arbitrary audio inputs. The subject style is learned from a video clip (1-2 minutes) either downloaded from the Internet or captured through an ordinary camera. Traditional methods often require many hours of the subject's video to learn a robust audio-driven model and are thus unsuitable for this task. Recent research efforts aim to train a model from video collections of a few subjects but ignore the discrimination between the subject style and underlying speech content within facial motions, leading to inaccurate style or articulation. To solve the problem, we propose a novel framework that disentangles subject-specific style and speech content from facial motions. The disentanglement is enabled by two novel training mechanisms. One is two-pass style swapping between two random subjects, and the other is joint training of the decomposition network and audio-to-motion network with a shared decoder. After training, the disentangled style is combined with arbitrary audio inputs to generate stylized audio-driven 3D facial animations. Compared with start-of-the-art methods, our approach achieves better results qualitatively and quantitatively, especially in difficult cases like bilabial plosive and bilabial nasal phonemes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a learning-based approach for generating 3D facial animations with the motion style of a specific subject from arbitrary audio inputs. The subject style is learned from a video clip (1-2 minutes) either downloaded from the Internet or captured through an ordinary camera. Traditional methods often require many hours of the subject's video to learn a robust audio-driven model and are thus unsuitable for this task. Recent research efforts aim to train a model from video collections of a few subjects but ignore the discrimination between the subject style and underlying speech content within facial motions, leading to inaccurate style or articulation. To solve the problem, we propose a novel framework that disentangles subject-specific style and speech content from facial motions. The disentanglement is enabled by two novel training mechanisms. One is two-pass style swapping between two random subjects, and the other is joint training of the decomposition network and audio-to-motion network with a shared decoder. After training, the disentangled style is combined with arbitrary audio inputs to generate stylized audio-driven 3D facial animations. Compared with start-of-the-art methods, our approach achieves better results qualitatively and quantitatively, especially in difficult cases like bilabial plosive and bilabial nasal phonemes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a learning-based approach for generating 3D facial animations with the motion style of a specific subject from arbitrary audio inputs. The subject style is learned from a video clip (1-2 minutes) either downloaded from the Internet or captured through an ordinary camera. Traditional methods often require many hours of the subject's video to learn a robust audio-driven model and are thus unsuitable for this task. Recent research efforts aim to train a model from video collections of a few subjects but ignore the discrimination between the subject style and underlying speech content within facial motions, leading to inaccurate style or articulation. To solve the problem, we propose a novel framework that disentangles subject-specific style and speech content from facial motions. The disentanglement is enabled by two novel training mechanisms. One is two-pass style swapping between two random subjects, and the other is joint training of the decomposition network and audio-to-motion network with a shared decoder. After training, the disentangled style is combined with arbitrary audio inputs to generate stylized audio-driven 3D facial animations. Compared with start-of-the-art methods, our approach achieves better results qualitatively and quantitatively, especially in difficult cases like bilabial plosive and bilabial nasal phonemes.",
"title": "Personalized Audio-Driven 3D Facial Animation Via Style-Content Disentanglement",
"normalizedTitle": "Personalized Audio-Driven 3D Facial Animation Via Style-Content Disentanglement",
"fno": "09992151",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Three Dimensional Displays",
"Training",
"Facial Animation",
"Codes",
"Decoding",
"Training Data",
"Lips",
"Audio Driven Animation",
"Facial Animation",
"Facial Motion Decomposition",
"Style Learning",
"Style Content Disentanglement"
],
"authors": [
{
"givenName": "Yujin",
"surname": "Chai",
"fullName": "Yujin Chai",
"affiliation": "State Key Laboratory of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tianjia",
"surname": "Shao",
"fullName": "Tianjia Shao",
"affiliation": "State Key Laboratory of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yanlin",
"surname": "Weng",
"fullName": "Yanlin Weng",
"affiliation": "State Key Laboratory of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kun",
"surname": "Zhou",
"fullName": "Kun Zhou",
"affiliation": "State Key Laboratory of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-18",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icmew/2014/4717/0/06890554",
"title": "Realtime speech-driven facial animation using Gaussian Mixture Models",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2014/06890554/12OmNBC8Ayh",
"parentPublication": {
"id": "proceedings/icmew/2014/4717/0",
"title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2008/3381/0/3381a648",
"title": "A Method for Deforming-Driven Exaggerated Facial Animation Generation",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2008/3381a648/12OmNvTjZRQ",
"parentPublication": {
"id": "proceedings/cw/2008/3381/0",
"title": "2008 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2001/1198/0/11980209",
"title": "AUDIO DRIVEN FACIAL ANIMATION FOR AUDIO-VISUAL REALITY",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2001/11980209/12OmNwtWfEP",
"parentPublication": {
"id": "proceedings/icme/2001/1198/0",
"title": "IEEE International Conference on Multimedia and Expo, 2001. ICME 2001.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2014/4761/0/06890231",
"title": "Real-time control of 3D facial animation",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2014/06890231/12OmNyOHG1A",
"parentPublication": {
"id": "proceedings/icme/2014/4761/0",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2017/04/08046117",
"title": "Audio-Driven Laughter Behavior Controller",
"doi": null,
"abstractUrl": "/journal/ta/2017/04/08046117/13rRUwj7cnI",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/06/v1523",
"title": "Expressive Facial Animation Synthesis by Learning Speech Coarticulation and Expression Spaces",
"doi": null,
"abstractUrl": "/journal/tg/2006/06/v1523/13rRUxASubv",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2017/04/mcg2017040030",
"title": "Data-Driven Approach to Synthesizing Facial Animation Using Motion Capture",
"doi": null,
"abstractUrl": "/magazine/cg/2017/04/mcg2017040030/13rRUyeTVkv",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200b153",
"title": "MeshTalk: 3D Face Animation from Speech using Cross-Modality Disentanglement",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200b153/1BmKgJNr99m",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049691",
"title": "Emotional Voice Puppetry",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049691/1KYouSCDkQM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2019/4637/0/463700a115",
"title": "Investigating Emotion Style in Human Faces and Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2019/463700a115/1fHHrDla9z2",
"parentPublication": {
"id": "proceedings/sbgames/2019/4637/0",
"title": "2019 18th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09992117",
"articleId": "1JevBim1nIA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09992129",
"articleId": "1JevCrH10vS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1Jgw1hWkUNy",
"name": "ttg555501-09992151s1-supp2-3230541.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09992151s1-supp2-3230541.mp4",
"extension": "mp4",
"size": "212 B",
"__typename": "WebExtraType"
},
{
"id": "1Jgw1nTlrTq",
"name": "ttg555501-09992151s1-supp1-3230541.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09992151s1-supp1-3230541.mp4",
"extension": "mp4",
"size": "458 B",
"__typename": "WebExtraType"
},
{
"id": "1Jgw1l7TJks",
"name": "ttg555501-09992151s1-supp3-3230541.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09992151s1-supp3-3230541.mp4",
"extension": "mp4",
"size": "268 B",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1J7ROVH0hpu",
"doi": "10.1109/TVCG.2022.3229354",
"abstract": "In this paper, we present new quality metrics for symmetric graph drawing based on group theory. Roughly speaking, the new metrics are <italic>faithfulness</italic> metrics, i.e., they measure how faithfully a drawing of a graph displays the ground truth (i.e., geometric automorphisms) of the graph as symmetries. More specifically, we introduce two types of automorphism faithfulness metrics for displaying: (1) a single geometric automorphism as a symmetry (<italic>axial</italic> or <italic>rotational</italic>), and (2) a group of geometric automorphisms (<italic>cyclic</italic> or <italic>dihedral</italic>). We present algorithms to compute the automorphism faithfulness metrics in <inline-formula><tex-math notation=\"LaTeX\">Z_$O(n \\log n)$_Z</tex-math></inline-formula> time. Moreover, we also present efficient algorithms to detect <italic>exact</italic> symmetries in a graph drawing. We then validate our automorphism faithfulness metrics using deformation experiments. Finally, we use the metrics to evaluate existing graph drawing algorithms to compare how faithfully they display geometric automorphisms of a graph as symmetries.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we present new quality metrics for symmetric graph drawing based on group theory. Roughly speaking, the new metrics are <italic>faithfulness</italic> metrics, i.e., they measure how faithfully a drawing of a graph displays the ground truth (i.e., geometric automorphisms) of the graph as symmetries. More specifically, we introduce two types of automorphism faithfulness metrics for displaying: (1) a single geometric automorphism as a symmetry (<italic>axial</italic> or <italic>rotational</italic>), and (2) a group of geometric automorphisms (<italic>cyclic</italic> or <italic>dihedral</italic>). We present algorithms to compute the automorphism faithfulness metrics in <inline-formula><tex-math notation=\"LaTeX\">$O(n \\log n)$</tex-math></inline-formula> time. Moreover, we also present efficient algorithms to detect <italic>exact</italic> symmetries in a graph drawing. We then validate our automorphism faithfulness metrics using deformation experiments. Finally, we use the metrics to evaluate existing graph drawing algorithms to compare how faithfully they display geometric automorphisms of a graph as symmetries.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we present new quality metrics for symmetric graph drawing based on group theory. Roughly speaking, the new metrics are faithfulness metrics, i.e., they measure how faithfully a drawing of a graph displays the ground truth (i.e., geometric automorphisms) of the graph as symmetries. More specifically, we introduce two types of automorphism faithfulness metrics for displaying: (1) a single geometric automorphism as a symmetry (axial or rotational), and (2) a group of geometric automorphisms (cyclic or dihedral). We present algorithms to compute the automorphism faithfulness metrics in - time. Moreover, we also present efficient algorithms to detect exact symmetries in a graph drawing. We then validate our automorphism faithfulness metrics using deformation experiments. Finally, we use the metrics to evaluate existing graph drawing algorithms to compare how faithfully they display geometric automorphisms of a graph as symmetries.",
"title": "Automorphism Faithfulness Metrics for Symmetric Graph Drawings",
"normalizedTitle": "Automorphism Faithfulness Metrics for Symmetric Graph Drawings",
"fno": "09987696",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Measurement",
"Graph Drawing",
"Orbits",
"Vehicle Dynamics",
"Time Measurement",
"Stress Measurement",
"Stress",
"Automorphism",
"Faithfulness Metrics",
"Graph Drawing",
"Symmetry"
],
"authors": [
{
"givenName": "A.",
"surname": "Meidiana",
"fullName": "A. Meidiana",
"affiliation": "University of Sydney",
"__typename": "ArticleAuthorType"
},
{
"givenName": "S.-H.",
"surname": "Hong",
"fullName": "S.-H. Hong",
"affiliation": "University of Sydney",
"__typename": "ArticleAuthorType"
},
{
"givenName": "P.",
"surname": "Eades",
"fullName": "P. Eades",
"affiliation": "University of Sydney",
"__typename": "ArticleAuthorType"
},
{
"givenName": "D.",
"surname": "Keim",
"fullName": "D. Keim",
"affiliation": "University of Konstanz",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccd/2004/2231/0/22310417",
"title": "Graph Automorphism-Based Algorithm for Determining Symmetric Inputs",
"doi": null,
"abstractUrl": "/proceedings-article/iccd/2004/22310417/12OmNBSSVdc",
"parentPublication": {
"id": "proceedings/iccd/2004/2231/0",
"title": "IEEE International Conference on Computer Design: VLSI in Computers and Processors, 2004. ICCD 2004. Proceedings.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2013/4797/0/06596147",
"title": "On the faithfulness of graph visualizations",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2013/06596147/12OmNwCJON7",
"parentPublication": {
"id": "proceedings/pacificvis/2013/4797/0",
"title": "2013 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2017/5738/0/08031607",
"title": "dNNG: Quality metrics and layout for neighbourhood faithfulness",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2017/08031607/12OmNxvwoZW",
"parentPublication": {
"id": "proceedings/pacificvis/2017/5738/0",
"title": "2017 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2019/02/08353712",
"title": "On the Strength of Privacy Metrics for Vehicular Communication",
"doi": null,
"abstractUrl": "/journal/tm/2019/02/08353712/17D45WZZ7Eb",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2022/2335/0/233500a051",
"title": "dGG, dRNG, DSC: New Degree-based Shape-based Faithfulness Metrics for Large and Complex Graph Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2022/233500a051/1E2wj8PptTi",
"parentPublication": {
"id": "proceedings/pacificvis/2022/2335/0",
"title": "2022 IEEE 15th Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10005087",
"title": "SubLinearForce: Fully Sublinear-Time Force Computation for Large Complex Graph Drawing",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10005087/1JC5yDf0E5q",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2022/9007/0/900700a021",
"title": "Clustering Ensemble-based Edge Bundling to Improve the Readability of Graph Drawings",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2022/900700a021/1KaH6ONvwzK",
"parentPublication": {
"id": "proceedings/iv/2022/9007/0",
"title": "2022 26th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tq/2022/01/09035391",
"title": "Using Metrics Suites to Improve the Measurement of Privacy in Graphs",
"doi": null,
"abstractUrl": "/journal/tq/2022/01/09035391/1iaePndvbS8",
"parentPublication": {
"id": "trans/tq",
"title": "IEEE Transactions on Dependable and Secure Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2020/5697/0/09086242",
"title": "Quality Metrics for Symmetric Graph Drawings",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2020/09086242/1kuHo2UqiZ2",
"parentPublication": {
"id": "proceedings/pacificvis/2020/5697/0",
"title": "2020 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/2022/06/09321740",
"title": "Within-Project Defect Prediction of Infrastructure-as-Code Using Product and Process Metrics",
"doi": null,
"abstractUrl": "/journal/ts/2022/06/09321740/1qmbqXUNfWw",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09984953",
"articleId": "1J6d2SwfUT6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09991899",
"articleId": "1JevAUCOqyc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1J6d2C45zpe",
"doi": "10.1109/TVCG.2022.3229017",
"abstract": "We present V-Mail, a framework of cross-platform applications, interactive techniques, and communication protocols for improved multi-person correspondence about spatial 3D datasets. Inspired by the daily use of e-mail, V-Mail seeks to enable a similar style of rapid, multi-person communication accessible on any device; however, it aims to do this in the new context of spatial 3D communication, where limited access to 3D graphics hardware typically prevents such communication. The approach integrates visual data storytelling with data exploration, spatial annotations, and animated transitions. V-Mail “data stories” are exported in a standard video file format to establish a common baseline level of access on (almost) any device. The V-Mail framework also includes a series of complementary client applications and plugins that enable different degrees of story co-authoring and data exploration, adjusted automatically to match the capabilities of various devices. A lightweight, phone-based V-Mail app makes it possible to annotate data by adding captions to the video. These spatial annotations are then immediately accessible to team members running high-end 3D graphics visualization systems that also include a V-Mail client, implemented as a plugin. Results and evaluation from applying V-Mail to assist communication within an interdisciplinary science team studying Antarctic ice sheets confirm the utility of the asynchronous, cross-platform collaborative framework while also highlighting some current limitations and opportunities for future work.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present V-Mail, a framework of cross-platform applications, interactive techniques, and communication protocols for improved multi-person correspondence about spatial 3D datasets. Inspired by the daily use of e-mail, V-Mail seeks to enable a similar style of rapid, multi-person communication accessible on any device; however, it aims to do this in the new context of spatial 3D communication, where limited access to 3D graphics hardware typically prevents such communication. The approach integrates visual data storytelling with data exploration, spatial annotations, and animated transitions. V-Mail “data stories” are exported in a standard video file format to establish a common baseline level of access on (almost) any device. The V-Mail framework also includes a series of complementary client applications and plugins that enable different degrees of story co-authoring and data exploration, adjusted automatically to match the capabilities of various devices. A lightweight, phone-based V-Mail app makes it possible to annotate data by adding captions to the video. These spatial annotations are then immediately accessible to team members running high-end 3D graphics visualization systems that also include a V-Mail client, implemented as a plugin. Results and evaluation from applying V-Mail to assist communication within an interdisciplinary science team studying Antarctic ice sheets confirm the utility of the asynchronous, cross-platform collaborative framework while also highlighting some current limitations and opportunities for future work.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present V-Mail, a framework of cross-platform applications, interactive techniques, and communication protocols for improved multi-person correspondence about spatial 3D datasets. Inspired by the daily use of e-mail, V-Mail seeks to enable a similar style of rapid, multi-person communication accessible on any device; however, it aims to do this in the new context of spatial 3D communication, where limited access to 3D graphics hardware typically prevents such communication. The approach integrates visual data storytelling with data exploration, spatial annotations, and animated transitions. V-Mail “data stories” are exported in a standard video file format to establish a common baseline level of access on (almost) any device. The V-Mail framework also includes a series of complementary client applications and plugins that enable different degrees of story co-authoring and data exploration, adjusted automatically to match the capabilities of various devices. A lightweight, phone-based V-Mail app makes it possible to annotate data by adding captions to the video. These spatial annotations are then immediately accessible to team members running high-end 3D graphics visualization systems that also include a V-Mail client, implemented as a plugin. Results and evaluation from applying V-Mail to assist communication within an interdisciplinary science team studying Antarctic ice sheets confirm the utility of the asynchronous, cross-platform collaborative framework while also highlighting some current limitations and opportunities for future work.",
"title": "V-Mail: 3D-Enabled Correspondence about Spatial Data on (Almost) All Your Devices",
"normalizedTitle": "V-Mail: 3D-Enabled Correspondence about Spatial Data on (Almost) All Your Devices",
"fno": "09984928",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Three Dimensional Displays",
"Annotations",
"Spatial Databases",
"Collaboration",
"Software",
"Task Analysis",
"Human Computer Interaction",
"Visualization Of Scientific 3 D Data",
"Communication",
"Storytelling",
"Immersive Analytics"
],
"authors": [
{
"givenName": "Jung Who",
"surname": "Nam",
"fullName": "Jung Who Nam",
"affiliation": "University of Minnesota, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tobias",
"surname": "Isenberg",
"fullName": "Tobias Isenberg",
"affiliation": "Université Paris-Saclay, CNRS, Inria, LISN, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Daniel F.",
"surname": "Keefe",
"fullName": "Daniel F. Keefe",
"affiliation": "University of Minnesota, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vast/2014/6227/0/07042491",
"title": "YMCA — Your mesh comparison application",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2014/07042491/12OmNAndiiu",
"parentPublication": {
"id": "proceedings/vast/2014/6227/0",
"title": "2014 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200i341",
"title": "Unsupervised Dense Deformation Embedding Network for Template-Free Shape Correspondence",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200i341/1BmGLPmnLOw",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200p5933",
"title": "Interpretation of Emergent Communication in Heterogeneous Collaborative Embodied Agents",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200p5933/1BmI3JWIO7C",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200b888",
"title": "Weakly Supervised Relative Spatial Reasoning for Visual Question Answering",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200b888/1BmJsltM1s4",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049700",
"title": "Using Virtual Replicas to Improve Mixed Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049700/1KYoAxyw5c4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2022/5670/0/567000a536",
"title": "SC6D: Symmetry-agnostic and Correspondence-free 6D Object Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2022/567000a536/1KYsuTJMnIc",
"parentPublication": {
"id": "proceedings/3dv/2022/5670/0",
"title": "2022 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2019/3131/0/313100a240",
"title": "V-NAS: Neural Architecture Search for Volumetric Medical Image Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2019/313100a240/1ezREtapD2M",
"parentPublication": {
"id": "proceedings/3dv/2019/3131/0",
"title": "2019 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a393",
"title": "Wearable RemoteFusion: A Mixed Reality Remote Collaboration System with Local Eye Gaze and Remote Hand Gesture Sharing",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a393/1gysjIlsYus",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "letters/ca/2020/01/09086777",
"title": "Heterogeneous 3D Integration for a RISC-V System With STT-MRAM",
"doi": null,
"abstractUrl": "/journal/ca/2020/01/09086777/1keqGxRHR7y",
"parentPublication": {
"id": "letters/ca",
"title": "IEEE Computer Architecture Letters",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2021/04/09437800",
"title": "Collaborative VR-Based 3D Labeling of Live-Captured Scenes by Remote Users",
"doi": null,
"abstractUrl": "/magazine/cg/2021/04/09437800/1tL6FQbaHG8",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09983833",
"articleId": "1J4xXB39h96",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09984953",
"articleId": "1J6d2SwfUT6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1J7RO3ei640",
"name": "ttg555501-09984928s1-tvcg-3229017-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09984928s1-tvcg-3229017-mm.zip",
"extension": "zip",
"size": "280 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1J6d2SwfUT6",
"doi": "10.1109/TVCG.2022.3229023",
"abstract": "High-quality visualization collections are beneficial for a variety of applications including visualization reference and data-driven visualization design. The visualization community has created many visualization collections, and developed interactive exploration systems for the collections. However, the systems are mainly based on extrinsic attributes like authors and publication years, whilst neglect intrinsic property (<italic>i.e</italic>., visual appearance) of visualizations, hindering visual comparison and query of visualization designs. This paper presents <italic>VISAtlas</italic>, an image-based approach empowered by neural image embedding, to facilitate exploration and query for visualization collections. To improve embedding accuracy, we create a comprehensive collection of synthetic and real-world visualizations, and use it to train a convolutional neural network (CNN) model with a triplet loss for taxonomical classification of visualizations. Next, we design a coordinated multiple view (CMV) system that enables multi-perspective exploration and design retrieval based on visualization embeddings. Specifically, we design a novel embedding overview that leverages contextual layout framework to preserve the context of the embedding vectors with the associated visualization taxonomies, and density plot and sampling techniques to address the overdrawing problem. We demonstrate in three case studies and one user study the effectiveness of <italic>VISAtlas</italic> in supporting comparative analysis of visualization collections, exploration of composite visualizations, and image-based retrieval of visualization designs. The studies reveal that real-world visualization collections (<italic>e.g</italic>., Beagle and VIS30K) better accord with the richness and diversity of visualization designs than synthetic collections (<italic>e.g</italic>., Data2Vis), inspiring composite visualizations are identified in real-world collections, and distinct design patterns exist in visualizations from different sources.",
"abstracts": [
{
"abstractType": "Regular",
"content": "High-quality visualization collections are beneficial for a variety of applications including visualization reference and data-driven visualization design. The visualization community has created many visualization collections, and developed interactive exploration systems for the collections. However, the systems are mainly based on extrinsic attributes like authors and publication years, whilst neglect intrinsic property (<italic>i.e</italic>., visual appearance) of visualizations, hindering visual comparison and query of visualization designs. This paper presents <italic>VISAtlas</italic>, an image-based approach empowered by neural image embedding, to facilitate exploration and query for visualization collections. To improve embedding accuracy, we create a comprehensive collection of synthetic and real-world visualizations, and use it to train a convolutional neural network (CNN) model with a triplet loss for taxonomical classification of visualizations. Next, we design a coordinated multiple view (CMV) system that enables multi-perspective exploration and design retrieval based on visualization embeddings. Specifically, we design a novel embedding overview that leverages contextual layout framework to preserve the context of the embedding vectors with the associated visualization taxonomies, and density plot and sampling techniques to address the overdrawing problem. We demonstrate in three case studies and one user study the effectiveness of <italic>VISAtlas</italic> in supporting comparative analysis of visualization collections, exploration of composite visualizations, and image-based retrieval of visualization designs. The studies reveal that real-world visualization collections (<italic>e.g</italic>., Beagle and VIS30K) better accord with the richness and diversity of visualization designs than synthetic collections (<italic>e.g</italic>., Data2Vis), inspiring composite visualizations are identified in real-world collections, and distinct design patterns exist in visualizations from different sources.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "High-quality visualization collections are beneficial for a variety of applications including visualization reference and data-driven visualization design. The visualization community has created many visualization collections, and developed interactive exploration systems for the collections. However, the systems are mainly based on extrinsic attributes like authors and publication years, whilst neglect intrinsic property (i.e., visual appearance) of visualizations, hindering visual comparison and query of visualization designs. This paper presents VISAtlas, an image-based approach empowered by neural image embedding, to facilitate exploration and query for visualization collections. To improve embedding accuracy, we create a comprehensive collection of synthetic and real-world visualizations, and use it to train a convolutional neural network (CNN) model with a triplet loss for taxonomical classification of visualizations. Next, we design a coordinated multiple view (CMV) system that enables multi-perspective exploration and design retrieval based on visualization embeddings. Specifically, we design a novel embedding overview that leverages contextual layout framework to preserve the context of the embedding vectors with the associated visualization taxonomies, and density plot and sampling techniques to address the overdrawing problem. We demonstrate in three case studies and one user study the effectiveness of VISAtlas in supporting comparative analysis of visualization collections, exploration of composite visualizations, and image-based retrieval of visualization designs. The studies reveal that real-world visualization collections (e.g., Beagle and VIS30K) better accord with the richness and diversity of visualization designs than synthetic collections (e.g., Data2Vis), inspiring composite visualizations are identified in real-world collections, and distinct design patterns exist in visualizations from different sources.",
"title": "VISAtlas: An Image-based Exploration and Query System for Large Visualization Collections via Neural Image Embedding",
"normalizedTitle": "VISAtlas: An Image-based Exploration and Query System for Large Visualization Collections via Neural Image Embedding",
"fno": "09984953",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Visualization",
"Feature Extraction",
"Task Analysis",
"Layout",
"Taxonomy",
"Semantics",
"Visualization Collection",
"Image Embedding",
"Visual Query",
"Image Visualization",
"Design Pattern"
],
"authors": [
{
"givenName": "Yilin",
"surname": "Ye",
"fullName": "Yilin Ye",
"affiliation": "Hong Kong University of Science and Technology, Guangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rong",
"surname": "Huang",
"fullName": "Rong Huang",
"affiliation": "Hong Kong University of Science and Technology, Guangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wei",
"surname": "Zeng",
"fullName": "Wei Zeng",
"affiliation": "Hong Kong University of Science and Technology, Guangzhou, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2008/3268/0/3268a246",
"title": "Coordinated and Multiple Views for Visualizing Text Collections",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2008/3268a246/12OmNzBwGtI",
"parentPublication": {
"id": "proceedings/iv/2008/3268/0",
"title": "2008 12th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2010/05/mcg2010050032",
"title": "Newdle: Interactive Visual Exploration of Large Online News Collections",
"doi": null,
"abstractUrl": "/magazine/cg/2010/05/mcg2010050032/13rRUEgariC",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/07/08358974",
"title": "A Semantic-Based Method for Visualizing Large Image Collections",
"doi": null,
"abstractUrl": "/journal/tg/2019/07/08358974/13rRUxC0SP1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904442",
"title": "Communicating Uncertainty in Digital Humanities Visualization Research",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904442/1H1gpt871W8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09916137",
"title": "Revisiting the Design Patterns of Composite Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09916137/1HojAjSAGNq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/11/08713940",
"title": "Embedding Meta Information into Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2020/11/08713940/1a31n7yR8kM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/04/08880504",
"title": "Content-Based Visual Summarization for Image Collections",
"doi": null,
"abstractUrl": "/journal/tg/2021/04/08880504/1emyadTwt0Y",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2022/01/09039632",
"title": "Steerable Self-Driving Data Visualization",
"doi": null,
"abstractUrl": "/journal/tk/2022/01/09039632/1igS2v9G6cw",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/11/09085893",
"title": "Contextual Translation Embedding for Visual Relationship Detection and Scene Graph Generation",
"doi": null,
"abstractUrl": "/journal/tp/2021/11/09085893/1jE1GK6vdWo",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552846",
"title": "A Critical Reflection on Visualization Research: Where Do Decision Making Tasks Hide?",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552846/1xibYOLsNc4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09984928",
"articleId": "1J6d2C45zpe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09987696",
"articleId": "1J7ROVH0hpu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1J7RPhWfcqY",
"name": "ttg555501-09984953s1-supp1-3229023.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09984953s1-supp1-3229023.mp4",
"extension": "mp4",
"size": "14.1 MB",
"__typename": "WebExtraType"
},
{
"id": "1J7RP9MHBM4",
"name": "ttg555501-09984953s1-supp2-3229023.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09984953s1-supp2-3229023.pdf",
"extension": "pdf",
"size": "1.22 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1J4xXB39h96",
"doi": "10.1109/TVCG.2022.3228807",
"abstract": "The delay of rendering on AR devices requires prediction of head motion using sensor data acquired tens of even one hundred milliseconds ago to avoid misalignment between the virtual content and the physical world, where the misalignment will lead to a sense of time latency and dizziness for users. To solve the problem, we propose a method for the 6DoF motion prediction to compensate for the time latency. Compared with traditional hand-crafted methods, our method is based on deep learning, which has better motion prediction ability to deal with complex human motion. In particular, we propose a MOtion UNcerTainty encode decode network (MOUNT) that estimates the uncertainty of input data and predicts the uncertainty of output motion to improve the prediction accuracy and smoothness. Experiments on the EuRoC and our collected dataset demonstrate that our method significantly outperforms the traditional method and greatly improves AR visual effects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The delay of rendering on AR devices requires prediction of head motion using sensor data acquired tens of even one hundred milliseconds ago to avoid misalignment between the virtual content and the physical world, where the misalignment will lead to a sense of time latency and dizziness for users. To solve the problem, we propose a method for the 6DoF motion prediction to compensate for the time latency. Compared with traditional hand-crafted methods, our method is based on deep learning, which has better motion prediction ability to deal with complex human motion. In particular, we propose a MOtion UNcerTainty encode decode network (MOUNT) that estimates the uncertainty of input data and predicts the uncertainty of output motion to improve the prediction accuracy and smoothness. Experiments on the EuRoC and our collected dataset demonstrate that our method significantly outperforms the traditional method and greatly improves AR visual effects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The delay of rendering on AR devices requires prediction of head motion using sensor data acquired tens of even one hundred milliseconds ago to avoid misalignment between the virtual content and the physical world, where the misalignment will lead to a sense of time latency and dizziness for users. To solve the problem, we propose a method for the 6DoF motion prediction to compensate for the time latency. Compared with traditional hand-crafted methods, our method is based on deep learning, which has better motion prediction ability to deal with complex human motion. In particular, we propose a MOtion UNcerTainty encode decode network (MOUNT) that estimates the uncertainty of input data and predicts the uncertainty of output motion to improve the prediction accuracy and smoothness. Experiments on the EuRoC and our collected dataset demonstrate that our method significantly outperforms the traditional method and greatly improves AR visual effects.",
"title": "MOUNT: Learning 6DoF Motion Prediction Based on Uncertainty Estimation for Delayed AR Rendering",
"normalizedTitle": "MOUNT: Learning 6DoF Motion Prediction Based on Uncertainty Estimation for Delayed AR Rendering",
"fno": "09983833",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Rendering Computer Graphics",
"Uncertainty",
"Task Analysis",
"Delays",
"Head",
"Hardware",
"Glass",
"Learning Environments",
"Learning Technologies",
"Virtual And Augmented Reality"
],
"authors": [
{
"givenName": "Haoran",
"surname": "Chen",
"fullName": "Haoran Chen",
"affiliation": "AI Innovation Center, School of Computer Science, Peking University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lantian",
"surname": "Wei",
"fullName": "Lantian Wei",
"affiliation": "Key Lab of Machine Perception (MOE), School of Intelligence Science and Technology, Peking University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Haomin",
"surname": "Liu",
"fullName": "Haomin Liu",
"affiliation": "Key Lab of Machine Perception (MOE), School of Intelligence Science and Technology, Peking University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Boxin",
"surname": "Shi",
"fullName": "Boxin Shi",
"affiliation": "National Engineering Research Center of Visual Technology, School of Computer Science, Peking University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Guofeng",
"surname": "Zhang",
"fullName": "Guofeng Zhang",
"affiliation": "State key lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hongbin",
"surname": "Zha",
"fullName": "Hongbin Zha",
"affiliation": "Key Lab of Machine Perception (MOE), School of Intelligence Science and Technology, Peking University, Beijing, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2017/09/07563342",
"title": "Uncertainty Visualization by Representative Sampling from Prediction Ensembles",
"doi": null,
"abstractUrl": "/journal/tg/2017/09/07563342/13rRUIM2VH4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200n3117",
"title": "Estimating and Exploiting the Aleatoric Uncertainty in Surface Normal Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200n3117/1BmFBTOLb9u",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a690",
"title": "Lightweight Wearable AR System using Head-mounted Projector for Work Support",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a690/1J7Wqal3Fkc",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2022/5725/0/572500a109",
"title": "Visualization of Machine Learning Uncertainty in AR-Based See-Through Applications",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2022/572500a109/1KmFcUFPF3G",
"parentPublication": {
"id": "proceedings/aivr/2022/5725/0",
"title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2022/5099/0/509900b275",
"title": "Uncertainty Propagation in Node Classification",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2022/509900b275/1KpCBAulk2Y",
"parentPublication": {
"id": "proceedings/icdm/2022/5099/0",
"title": "2022 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800m2011",
"title": "Uncertainty-Aware CNNs for Depth Completion: Uncertainty from Beginning to End",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800m2011/1m3nqBO2klG",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412616",
"title": "Separation of Aleatoric and Epistemic Uncertainty in Deterministic Deep Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412616/1tmiobOt10A",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a768",
"title": "Evaluating the Potential of Glanceable AR Interfaces for Authentic Everyday Uses",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a768/1tuAQLvc5WM",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aitest/2021/3481/0/348100a093",
"title": "Prediction Surface Uncertainty Quantification in Object Detection Models for Autonomous Driving",
"doi": null,
"abstractUrl": "/proceedings-article/aitest/2021/348100a093/1xH9KgUfKKs",
"parentPublication": {
"id": "proceedings/aitest/2021/3481/0",
"title": "2021 IEEE International Conference On Artificial Intelligence Testing (AITest)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a109",
"title": "Comparing Head and AR Glasses Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a109/1yeQMONGc9y",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09982378",
"articleId": "1J2T8H9Y2Ws",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09984928",
"articleId": "1J6d2C45zpe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1JR6e8VH7So",
"name": "ttg555501-09983833s1-supp1-3228807.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09983833s1-supp1-3228807.mp4",
"extension": "mp4",
"size": "251 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1J2T8H9Y2Ws",
"doi": "10.1109/TVCG.2022.3228707",
"abstract": "Converting a human portrait to anime style is a desirable but challenging problem. Existing methods fail to resolve this problem due to the large inherent gap between two domains that cannot be overcome by a simple direct mapping. For this reason, these methods struggle to preserve the appearance features in the original photo. In this paper, we discover an intermediate domain, the coser portrait (portraits of humans costuming as anime characters), that helps bridge this gap. It alleviates the learning ambiguity and loosens the mapping difficulty in a progressive manner. Specifically, we start from learning the mapping between coser and anime portraits, and present a proxy-guided domain adaptation learning scheme with three progressive adaptation stages to shift the initial model to the human portrait domain. In this way, our model can generate visually pleasant anime portraits with well-preserved appearances given the human portrait. Our model adopts a disentangled design by breaking down the translation problem into two specific subtasks of face deformation and portrait stylization. This further elevates the generation quality. Extensive experimental results show that our model can achieve visually compelling translation with better appearance preservation and perform favorably against the existing methods both qualitatively and quantitatively. <italic>Our code and datasets are available at <uri>https://github.com/NeverGiveU/PDA-Translation</uri></italic>.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Converting a human portrait to anime style is a desirable but challenging problem. Existing methods fail to resolve this problem due to the large inherent gap between two domains that cannot be overcome by a simple direct mapping. For this reason, these methods struggle to preserve the appearance features in the original photo. In this paper, we discover an intermediate domain, the coser portrait (portraits of humans costuming as anime characters), that helps bridge this gap. It alleviates the learning ambiguity and loosens the mapping difficulty in a progressive manner. Specifically, we start from learning the mapping between coser and anime portraits, and present a proxy-guided domain adaptation learning scheme with three progressive adaptation stages to shift the initial model to the human portrait domain. In this way, our model can generate visually pleasant anime portraits with well-preserved appearances given the human portrait. Our model adopts a disentangled design by breaking down the translation problem into two specific subtasks of face deformation and portrait stylization. This further elevates the generation quality. Extensive experimental results show that our model can achieve visually compelling translation with better appearance preservation and perform favorably against the existing methods both qualitatively and quantitatively. <italic>Our code and datasets are available at <uri>https://github.com/NeverGiveU/PDA-Translation</uri></italic>.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Converting a human portrait to anime style is a desirable but challenging problem. Existing methods fail to resolve this problem due to the large inherent gap between two domains that cannot be overcome by a simple direct mapping. For this reason, these methods struggle to preserve the appearance features in the original photo. In this paper, we discover an intermediate domain, the coser portrait (portraits of humans costuming as anime characters), that helps bridge this gap. It alleviates the learning ambiguity and loosens the mapping difficulty in a progressive manner. Specifically, we start from learning the mapping between coser and anime portraits, and present a proxy-guided domain adaptation learning scheme with three progressive adaptation stages to shift the initial model to the human portrait domain. In this way, our model can generate visually pleasant anime portraits with well-preserved appearances given the human portrait. Our model adopts a disentangled design by breaking down the translation problem into two specific subtasks of face deformation and portrait stylization. This further elevates the generation quality. Extensive experimental results show that our model can achieve visually compelling translation with better appearance preservation and perform favorably against the existing methods both qualitatively and quantitatively. Our code and datasets are available at https://github.com/NeverGiveU/PDA-Translation.",
"title": "Appearance-preserved Portrait-to-anime Translation via Proxy-guided Domain Adaptation",
"normalizedTitle": "Appearance-preserved Portrait-to-anime Translation via Proxy-guided Domain Adaptation",
"fno": "09982378",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Adaptation Models",
"Training",
"Strain",
"Shape",
"Semantics",
"Faces",
"Deformable Models",
"Portrait To Anime Translation",
"Coser Portrait Proxy",
"Domain Adaptation"
],
"authors": [
{
"givenName": "Wenpeng",
"surname": "Xiao",
"fullName": "Wenpeng Xiao",
"affiliation": "School of Computer Science and Engineering, South China University of Technology, Guangdong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Cheng",
"surname": "Xu",
"fullName": "Cheng Xu",
"affiliation": "School of Computer Science and Engineering, South China University of Technology, Guangdong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jiajie",
"surname": "Mai",
"fullName": "Jiajie Mai",
"affiliation": "King's College London, London, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xuemiao",
"surname": "Xu",
"fullName": "Xuemiao Xu",
"affiliation": "Ministry of Education Key Laboratory of Big Data and Intelligent Robot and Guangdong, and State Key Laboratory of Subtropical Building Science, Provincial Key Lab of Computational Intelligence and Cyberspace Information",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yue",
"surname": "Li",
"fullName": "Yue Li",
"affiliation": "School of Computer Science and Engineering, South China University of Technology, Guangdong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chengze",
"surname": "Li",
"fullName": "Chengze Li",
"affiliation": "Caritas Institute of Higher Education, Hong Kong SAR, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xueting",
"surname": "Liu",
"fullName": "Xueting Liu",
"affiliation": "Caritas Institute of Higher Education, Hong Kong SAR, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shengfeng",
"surname": "He",
"fullName": "Shengfeng He",
"affiliation": "School of Computing and Information Systems, Singapore Management University, Singapore",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-17",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457h426",
"title": "ROAM: A Rich Object Appearance Model with Application to Rotoscoping",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457h426/12OmNrH1PER",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/01/09699090",
"title": "Quality Metric Guided Portrait Line Drawing Generation From Unpaired Training Data",
"doi": null,
"abstractUrl": "/journal/tp/2023/01/09699090/1ADJdtRWkNO",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09852330",
"title": "Neural Modeling of Portrait Bas-relief from a Single Photograph",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09852330/1FFHdt1RWHC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eiect/2022/9956/0/995600a058",
"title": "Digital portrait model construction for telemedicine doctor authentication",
"doi": null,
"abstractUrl": "/proceedings-article/eiect/2022/995600a058/1LHcAvYlDxK",
"parentPublication": {
"id": "proceedings/eiect/2022/9956/0",
"title": "2022 2nd International Conference on Electronic Information Engineering and Computer Technology (EIECT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2019/2607/1/260701a352",
"title": "Purchasing Behavior Analysis Based on Customer's Data Portrait Model",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2019/260701a352/1cYiCnOZUSQ",
"parentPublication": {
"id": "proceedings/compsac/2019/2607/1",
"title": "2019 IEEE 43rd Annual Computer Software and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2019/2297/0/229700a053",
"title": "Semi-Automatic Creation of an Anime-Like 3D Face Model from a Single Illustration",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2019/229700a053/1fHklgptlIs",
"parentPublication": {
"id": "proceedings/cw/2019/2297/0",
"title": "2019 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800b341",
"title": "Robust 3D Self-Portraits in Seconds",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800b341/1m3nzAe5fKE",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800n3515",
"title": "PuppeteerGAN: Arbitrary Portrait Animation With Semantic-Aware Appearance Transformation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800n3515/1m3opHwOOZy",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-dss-smartcity/2020/7649/0/764900b225",
"title": "A Novel Developer Portrait Model based on Bert-Capsule Network",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-dss-smartcity/2020/764900b225/1t7mT1qOv3q",
"parentPublication": {
"id": "proceedings/hpcc-dss-smartcity/2020/7649/0",
"title": "2020 IEEE 22nd International Conference on High Performance Computing and Communications; IEEE 18th International Conference on Smart City; IEEE 6th International Conference on Data Science and Systems (HPCC/SmartCity/DSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/02/09547845",
"title": "Exemplar-Based 3D Portrait Stylization",
"doi": null,
"abstractUrl": "/journal/tg/2023/02/09547845/1x9TLh9tiow",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09978915",
"articleId": "1IXUnNBj0Yw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09983833",
"articleId": "1J4xXB39h96",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1J9xUtHhStq",
"name": "ttg555501-09982378s1-supp1-3228707.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09982378s1-supp1-3228707.pdf",
"extension": "pdf",
"size": "10 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1IXUnEM2oc8",
"doi": "10.1109/TVCG.2022.3227970",
"abstract": "With the development of 3D digital geometry technology, 3D triangular meshes are becoming more useful and valuable in industrial manufacturing and digital entertainment. A high quality triangular mesh can be used to represent a real world object with geometric and physical characteristics. While anisotropic meshes have advantages of representing shapes with sharp features (such as trimmed surfaces) more efficiently and accurately, isotropic meshes allow more numerically stable computations. When there is no anisotropic mesh requirement, isotropic triangles are always a good choice. In this paper, we propose a remeshing method to convert an input mesh into an adaptively isotropic one based on a curvature smoothed field (CSF). With the help of the CSF, adaptively isotropic remeshing can retain the curvature sensitivity, which enables more geometric features to be kept, and avoid the occurrence of obtuse triangles in the remeshed model as much as possible. The remeshed triangles with locally isotropic property benefit various geometric processes such as neighbor-based feature extraction and analysis. The experimental results show that our method achieves better balance between geometric feature preservation and mesh quality improvement compared to peers. We provide the implementation codes of our resampling method at <uri>https://github.com/vvvwo/Adaptively-Isotropic-Remeshing</uri>",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the development of 3D digital geometry technology, 3D triangular meshes are becoming more useful and valuable in industrial manufacturing and digital entertainment. A high quality triangular mesh can be used to represent a real world object with geometric and physical characteristics. While anisotropic meshes have advantages of representing shapes with sharp features (such as trimmed surfaces) more efficiently and accurately, isotropic meshes allow more numerically stable computations. When there is no anisotropic mesh requirement, isotropic triangles are always a good choice. In this paper, we propose a remeshing method to convert an input mesh into an adaptively isotropic one based on a curvature smoothed field (CSF). With the help of the CSF, adaptively isotropic remeshing can retain the curvature sensitivity, which enables more geometric features to be kept, and avoid the occurrence of obtuse triangles in the remeshed model as much as possible. The remeshed triangles with locally isotropic property benefit various geometric processes such as neighbor-based feature extraction and analysis. The experimental results show that our method achieves better balance between geometric feature preservation and mesh quality improvement compared to peers. We provide the implementation codes of our resampling method at <uri>https://github.com/vvvwo/Adaptively-Isotropic-Remeshing</uri>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the development of 3D digital geometry technology, 3D triangular meshes are becoming more useful and valuable in industrial manufacturing and digital entertainment. A high quality triangular mesh can be used to represent a real world object with geometric and physical characteristics. While anisotropic meshes have advantages of representing shapes with sharp features (such as trimmed surfaces) more efficiently and accurately, isotropic meshes allow more numerically stable computations. When there is no anisotropic mesh requirement, isotropic triangles are always a good choice. In this paper, we propose a remeshing method to convert an input mesh into an adaptively isotropic one based on a curvature smoothed field (CSF). With the help of the CSF, adaptively isotropic remeshing can retain the curvature sensitivity, which enables more geometric features to be kept, and avoid the occurrence of obtuse triangles in the remeshed model as much as possible. The remeshed triangles with locally isotropic property benefit various geometric processes such as neighbor-based feature extraction and analysis. The experimental results show that our method achieves better balance between geometric feature preservation and mesh quality improvement compared to peers. We provide the implementation codes of our resampling method at https://github.com/vvvwo/Adaptively-Isotropic-Remeshing",
"title": "Adaptively Isotropic Remeshing based on Curvature Smoothed Field",
"normalizedTitle": "Adaptively Isotropic Remeshing based on Curvature Smoothed Field",
"fno": "09978684",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Optimization",
"Faces",
"Adaptation Models",
"Aerospace Electronics",
"Three Dimensional Displays",
"Solid Modeling",
"Histograms",
"Adaptively Isotropic",
"Remeshing",
"Curvature Smoothed Field"
],
"authors": [
{
"givenName": "Chenlei",
"surname": "Lv",
"fullName": "Chenlei Lv",
"affiliation": "School of Computer Science and Engineering, Nanyang Technological University, Singapore",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Weisi",
"surname": "Lin",
"fullName": "Weisi Lin",
"affiliation": "School of Computer Science and Engineering, Nanyang Technological University, Singapore",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jianmin",
"surname": "Zheng",
"fullName": "Jianmin Zheng",
"affiliation": "School of Computer Science and Engineering, Nanyang Technological University, Singapore",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/isise/2009/6325/0/05447224",
"title": "A Three Dimensional Mesh Improvement Algorithm Based on Curvature Flow",
"doi": null,
"abstractUrl": "/proceedings-article/isise/2009/05447224/12OmNBpVQ4H",
"parentPublication": {
"id": "proceedings/isise/2009/6325/0",
"title": "2009 Second International Symposium on Information Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/2004/2234/0/22340207",
"title": "Direct Anisotropic Quad-Dominant Remeshing",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2004/22340207/12OmNCfjerO",
"parentPublication": {
"id": "proceedings/pg/2004/2234/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smi/2010/7259/0/05521462",
"title": "Reversely Anisotropic Quad-dominant Remeshing",
"doi": null,
"abstractUrl": "/proceedings-article/smi/2010/05521462/12OmNywxlVm",
"parentPublication": {
"id": "proceedings/smi/2010/7259/0",
"title": "Shape Modeling International (SMI 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smi/2003/1845/0/18450049",
"title": "Isotropic Surface Remeshing",
"doi": null,
"abstractUrl": "/proceedings-article/smi/2003/18450049/12OmNzZmZvh",
"parentPublication": {
"id": "proceedings/smi/2003/1845/0",
"title": "Shape Modeling and Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/07/08361045",
"title": "Isotropic Surface Remeshing without Large and Small Angles",
"doi": null,
"abstractUrl": "/journal/tg/2019/07/08361045/13rRUIM2VBN",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2014/01/mcg2014010052",
"title": "Highly Parallel Algorithms for Visual-Perception-Guided Surface Remeshing",
"doi": null,
"abstractUrl": "/magazine/cg/2014/01/mcg2014010052/13rRUwwslv2",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2008/02/ttg2008020369",
"title": "Generic Remeshing of 3D Triangular Meshes with Metric-Dependent Discrete Voronoi Diagrams",
"doi": null,
"abstractUrl": "/journal/tg/2008/02/ttg2008020369/13rRUxBJhvn",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/09/07346512",
"title": "Non-Obtuse Remeshing with Centroidal Voronoi Tessellation",
"doi": null,
"abstractUrl": "/journal/tg/2016/09/07346512/13rRUzp02or",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/03/09804752",
"title": "Intrinsic and Isotropic Resampling for 3D Point Clouds",
"doi": null,
"abstractUrl": "/journal/tp/2023/03/09804752/1ErlhDR4iI0",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/03/09167456",
"title": "Surface Remeshing: A Systematic Literature Review of Methods and Research Directions",
"doi": null,
"abstractUrl": "/journal/tg/2022/03/09167456/1mhPPdfu11u",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09978713",
"articleId": "1IXUnnVaWoE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09978915",
"articleId": "1IXUnNBj0Yw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1IXUnnVaWoE",
"doi": "10.1109/TVCG.2022.3228171",
"abstract": "Walking-in-place (WIP) is a locomotion technique that enables users to “walk infinitely” through vast virtual environments using walking-like gestures within a limited physical space. This paper investigates alternative interaction schemes for WIP, addressing successively the control, input, and output of WIP. First, we introduce a novel height-based control to increase advanced speed. Second, we introduce a novel input system for WIP based on elastic and passive strips. Third, we introduce the use of pseudo-haptic feedback as a novel output for WIP meant to alter walking sensations. The results of a series of user studies show that height and frequency based control of WIP can facilitate higher virtual speed with greater efficacy and ease than in frequency-based WIP. Second, using an upward elastic input system can result in a stable virtual speed control, although excessively strong elastic forces may impact the usability and user experience. Finally, using a pseudo-haptic approach can improve the perceived realism of virtual slopes. Taken together, our results suggest that, for future VR applications, there is value in further research into the use of alternative interaction schemes for walking-in-place.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Walking-in-place (WIP) is a locomotion technique that enables users to “walk infinitely” through vast virtual environments using walking-like gestures within a limited physical space. This paper investigates alternative interaction schemes for WIP, addressing successively the control, input, and output of WIP. First, we introduce a novel height-based control to increase advanced speed. Second, we introduce a novel input system for WIP based on elastic and passive strips. Third, we introduce the use of pseudo-haptic feedback as a novel output for WIP meant to alter walking sensations. The results of a series of user studies show that height and frequency based control of WIP can facilitate higher virtual speed with greater efficacy and ease than in frequency-based WIP. Second, using an upward elastic input system can result in a stable virtual speed control, although excessively strong elastic forces may impact the usability and user experience. Finally, using a pseudo-haptic approach can improve the perceived realism of virtual slopes. Taken together, our results suggest that, for future VR applications, there is value in further research into the use of alternative interaction schemes for walking-in-place.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Walking-in-place (WIP) is a locomotion technique that enables users to “walk infinitely” through vast virtual environments using walking-like gestures within a limited physical space. This paper investigates alternative interaction schemes for WIP, addressing successively the control, input, and output of WIP. First, we introduce a novel height-based control to increase advanced speed. Second, we introduce a novel input system for WIP based on elastic and passive strips. Third, we introduce the use of pseudo-haptic feedback as a novel output for WIP meant to alter walking sensations. The results of a series of user studies show that height and frequency based control of WIP can facilitate higher virtual speed with greater efficacy and ease than in frequency-based WIP. Second, using an upward elastic input system can result in a stable virtual speed control, although excessively strong elastic forces may impact the usability and user experience. Finally, using a pseudo-haptic approach can improve the perceived realism of virtual slopes. Taken together, our results suggest that, for future VR applications, there is value in further research into the use of alternative interaction schemes for walking-in-place.",
"title": "Revisiting Walking-in-Place by Introducing Step-Height Control, Elastic Input, and Pseudo-Haptic Feedback",
"normalizedTitle": "Revisiting Walking-in-Place by Introducing Step-Height Control, Elastic Input, and Pseudo-Haptic Feedback",
"fno": "09978713",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Legged Locomotion",
"Haptic Interfaces",
"Frequency Control",
"Force",
"Mathematical Models",
"Foot",
"Visualization",
"Walking In Place",
"Pseudo Haptics",
"Passive Haptics",
"Elastic Input",
"Virtual Reality",
"Locomotion"
],
"authors": [
{
"givenName": "Yutaro",
"surname": "Hirao",
"fullName": "Yutaro Hirao",
"affiliation": "University of Tokyo, Tokyo, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Takuji",
"surname": "Narumi",
"fullName": "Takuji Narumi",
"affiliation": "University of Tokyo, Tokyo, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ferran",
"surname": "Argelaguet",
"fullName": "Ferran Argelaguet",
"affiliation": "Univ. Rennes, Inria, IRISA, CNRS, Rennes, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anatole",
"surname": "Lécuyer",
"fullName": "Anatole Lécuyer",
"affiliation": "Univ. Rennes, Inria, IRISA, CNRS, Rennes, France",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2010/6237/0/05444812",
"title": "GUD WIP: Gait-Understanding-Driven Walking-In-Place",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2010/05444812/12OmNAle6ku",
"parentPublication": {
"id": "proceedings/vr/2010/6237/0",
"title": "2010 IEEE Virtual Reality Conference (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2013/6097/0/06550193",
"title": "Tapping-In-Place: Increasing the naturalness of immersive walking-in-place locomotion through novel gestural input",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550193/12OmNAnMuyq",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504772",
"title": "Evaluating two alternative walking in place interfaces for virtual reality gaming",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504772/12OmNCf1Dnb",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2014/3624/0/06798850",
"title": "A comparison of different methods for reducing the unintended positional drift accompanying walking-in-place locomotion",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2014/06798850/12OmNvCzFbu",
"parentPublication": {
"id": "proceedings/3dui/2014/3624/0",
"title": "2014 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg201404569",
"title": "Establishing the Range of Perceptually Natural Visual Walking Speeds for Virtual Walking-In-Place Locomotion",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg201404569/13rRUxAASTb",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2016/03/07444168",
"title": "Comparison of Walking and Traveling-Wave Piezoelectric Motors as Actuators in Kinesthetic Haptic Devices",
"doi": null,
"abstractUrl": "/journal/th/2016/03/07444168/13rRUxDqS8t",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699289",
"title": "Walking-in-Place for VR Navigation Independent of Gaze Direction Using a Waist-Worn Inertial Measurement Unit",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699289/19F1PlWtKJa",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049680",
"title": "Assisted walking-in-place: Introducing assisted motion to walking-by-cycling in embodied virtual reality",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049680/1KYolEFtr6U",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798345",
"title": "Investigation of Visual Self-Representation for a Walking-in-Place Navigation System in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798345/1cJ1hpkUgHS",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a498",
"title": "SHeF-WIP: Walking-in-Place based on Step Height and Frequency for Wider Range of Virtual Speed",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a498/1tnWFlvbESk",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09978718",
"articleId": "1IXUnbRdUEE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09978684",
"articleId": "1IXUnEM2oc8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |