data
dict
{ "issue": { "id": "1zBamVZHyne", "title": "Jan.", "year": "2022", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1xH5FXdMnoA", "doi": "10.1109/TVCG.2021.3114805", "abstract": "Problem-driven visualization work is rooted in deeply understanding the data, actors, processes, and workflows of a target domain. However, an individual&#x0027;s personality traits and cognitive abilities may also influence visualization use. Diverse user needs and abilities raise natural questions for specificity in visualization design: <italic>Could individuals from different domains exhibit performance differences when using visualizations? Are any systematic variations related to their cognitive abilities?</italic> This study bridges domain-specific perspectives on visualization design with those provided by cognition and perception. We measure variations in visualization task performance across chemistry, computer science, and education, and relate these differences to variations in spatial ability. We conducted an online study with over 60 domain experts consisting of tasks related to pie charts, isocontour plots, and 3D scatterplots, and grounded by a well-documented spatial ability test. Task performance (correctness) varied with profession across more complex visualizations (isocontour plots and scatterplots), but not pie charts, a comparatively common visualization. We found that correctness correlates with spatial ability, and the professions differ in terms of spatial ability. These results indicate that domains differ not only in the specifics of their data and tasks, but also in terms of how effectively their constituent members engage with visualizations and their cognitive traits. Analyzing participants&#x0027; confidence and strategy comments suggests that focusing on performance neglects important nuances, such as differing approaches to engage with even common visualizations and potential skill transference. Our findings offer a fresh perspective on discipline-specific visualization with specific recommendations to help guide visualization design that celebrates the uniqueness of the disciplines and individuals we seek to serve.", "abstracts": [ { "abstractType": "Regular", "content": "Problem-driven visualization work is rooted in deeply understanding the data, actors, processes, and workflows of a target domain. However, an individual&#x0027;s personality traits and cognitive abilities may also influence visualization use. Diverse user needs and abilities raise natural questions for specificity in visualization design: <italic>Could individuals from different domains exhibit performance differences when using visualizations? Are any systematic variations related to their cognitive abilities?</italic> This study bridges domain-specific perspectives on visualization design with those provided by cognition and perception. We measure variations in visualization task performance across chemistry, computer science, and education, and relate these differences to variations in spatial ability. We conducted an online study with over 60 domain experts consisting of tasks related to pie charts, isocontour plots, and 3D scatterplots, and grounded by a well-documented spatial ability test. Task performance (correctness) varied with profession across more complex visualizations (isocontour plots and scatterplots), but not pie charts, a comparatively common visualization. We found that correctness correlates with spatial ability, and the professions differ in terms of spatial ability. These results indicate that domains differ not only in the specifics of their data and tasks, but also in terms of how effectively their constituent members engage with visualizations and their cognitive traits. Analyzing participants&#x0027; confidence and strategy comments suggests that focusing on performance neglects important nuances, such as differing approaches to engage with even common visualizations and potential skill transference. Our findings offer a fresh perspective on discipline-specific visualization with specific recommendations to help guide visualization design that celebrates the uniqueness of the disciplines and individuals we seek to serve.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Problem-driven visualization work is rooted in deeply understanding the data, actors, processes, and workflows of a target domain. However, an individual's personality traits and cognitive abilities may also influence visualization use. Diverse user needs and abilities raise natural questions for specificity in visualization design: Could individuals from different domains exhibit performance differences when using visualizations? Are any systematic variations related to their cognitive abilities? This study bridges domain-specific perspectives on visualization design with those provided by cognition and perception. We measure variations in visualization task performance across chemistry, computer science, and education, and relate these differences to variations in spatial ability. We conducted an online study with over 60 domain experts consisting of tasks related to pie charts, isocontour plots, and 3D scatterplots, and grounded by a well-documented spatial ability test. Task performance (correctness) varied with profession across more complex visualizations (isocontour plots and scatterplots), but not pie charts, a comparatively common visualization. We found that correctness correlates with spatial ability, and the professions differ in terms of spatial ability. These results indicate that domains differ not only in the specifics of their data and tasks, but also in terms of how effectively their constituent members engage with visualizations and their cognitive traits. Analyzing participants' confidence and strategy comments suggests that focusing on performance neglects important nuances, such as differing approaches to engage with even common visualizations and potential skill transference. Our findings offer a fresh perspective on discipline-specific visualization with specific recommendations to help guide visualization design that celebrates the uniqueness of the disciplines and individuals we seek to serve.", "title": "Professional Differences: A Comparative Study of Visualization Task Performance and Spatial Ability Across Disciplines", "normalizedTitle": "Professional Differences: A Comparative Study of Visualization Task Performance and Spatial Ability Across Disciplines", "fno": "09572234", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Cognition", "Computer Aided Instruction", "Data Visualisation", "Professional Aspects", "Professional Differences", "Visualization Task Performance", "Problem Driven Visualization Work", "Target Domain", "Individual", "Cognitive Abilities", "Visualization Design", "Study Bridges Domain Specific Perspectives", "Cognition", "60 Domain Experts", "Pie Charts", "Isocontour Plots", "Spatial Ability Test", "Complex Visualizations", "Comparatively Common Visualization", "Cognitive Traits", "Performance Neglects Important Nuances", "Differing Approaches", "Common Visualizations", "Discipline Specific Visualization", "Data Visualization", "Task Analysis", "Visualization", "Cognition", "Training", "Three Dimensional Displays", "Navigation", "Visualization", "Spatial Ability", "Perception", "Task Performance", "Discipline", "Domain Specific", "Empirical Evaluation" ], "authors": [ { "givenName": "Kyle Wm.", "surname": "Hall", "fullName": "Kyle Wm. Hall", "affiliation": "Temple University, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Anthony", "surname": "Kouroupis", "fullName": "Anthony Kouroupis", "affiliation": "Ontario Tech University, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "Anastasia", "surname": "Bezerianos", "fullName": "Anastasia Bezerianos", "affiliation": "LRI-Université Paris-Saclay, France", "__typename": "ArticleAuthorType" }, { "givenName": "Danielle Albers", "surname": "Szafir", "fullName": "Danielle Albers Szafir", "affiliation": "University of Colorado Boulder, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Christopher", "surname": "Collins", "fullName": "Christopher Collins", "affiliation": "Ontario Tech University, Canada", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2022-01-01 00:00:00", "pubType": "trans", "pages": "654-664", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/infvis/1997/8189/0/00636794", "title": "Metrics for effective information visualization", "doi": null, "abstractUrl": "/proceedings-article/infvis/1997/00636794/12OmNAtstaH", "parentPublication": { "id": "proceedings/infvis/1997/8189/0", "title": "Proceedings of VIZ '97: Visualization Conference, Information Visualization Symposium and Parallel Rendering Symposium", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/27660065", "title": "Understanding Visualization through Spatial Ability Differences", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660065/12OmNxwWoKu", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dvis/2014/6826/0/07160096", "title": "3D InfoVis is here to stay: Deal with it", "doi": null, "abstractUrl": "/proceedings-article/3dvis/2014/07160096/12OmNy5hReS", "parentPublication": { "id": "proceedings/3dvis/2014/6826/0", "title": "2014 IEEE VIS International Workshop on 3DVis (3DVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2014/4258/0/4258a080", "title": "A Nested Hierarchy of Localized Scatterplots", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2014/4258a080/12OmNy7h3e0", "parentPublication": { "id": "proceedings/sibgrapi/2014/4258/0", "title": "2014 27th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532836", "title": "Understanding visualization through spatial ability differences", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532836/12OmNzl3X0g", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-infovis/1997/8189/0/00636794", "title": "Metrics for effective information visualization", "doi": null, "abstractUrl": "/proceedings-article/ieee-infovis/1997/00636794/1h0Jt1gK27m", "parentPublication": { "id": "proceedings/ieee-infovis/1997/8189/0", "title": "Information Visualization, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089446", "title": "Graphical Perception for Immersive Analytics", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089446/1jIxfA3tlUk", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09222098", "title": "Embodied Navigation in Immersive Abstract Data Visualization: Is Overview+Detail or Zooming Better for 3D Scatterplots?", "doi": null, "abstractUrl": "/journal/tg/2021/02/09222098/1nTrQ1hHyyA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09223669", "title": "Personal Augmented Reality for Information Visualization on Large Interactive Displays", "doi": null, "abstractUrl": "/journal/tg/2021/02/09223669/1nV6cy8Xk5i", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/01/09552195", "title": "Perception&#x0021; Immersion&#x0021; Empowerment&#x0021; Superpowers as Inspiration for Visualization", "doi": null, "abstractUrl": "/journal/tg/2022/01/09552195/1xic0yNxnws", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09552211", "articleId": "1xic1bREyqY", "__typename": "AdjacentArticleType" }, "next": { "fno": "09555646", "articleId": "1xlw1u3Uiw8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1zBaJVCkKCQ", "name": "ttg202201-09572234s1-tvcg-3114805-mm.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09572234s1-tvcg-3114805-mm.zip", "extension": "zip", "size": "3.02 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1zBamVZHyne", "title": "Jan.", "year": "2022", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1xlw1u3Uiw8", "doi": "10.1109/TVCG.2021.3114959", "abstract": "Professional roles for data visualization designers are growing in popularity, and interest in relationships between the academic research and professional practice communities is gaining traction. However, despite the potential for knowledge sharing between these communities, we have little understanding of the ways in which practitioners design in real-world, professional settings. Inquiry in numerous design disciplines indicates that practitioners approach complex situations in ways that are fundamentally different from those of researchers. In this work, I take a practice-led approach to understanding visualization design practice on its own terms. Twenty data visualization practitioners were interviewed and asked about their design process, including the steps they take, how they make decisions, and the methods they use. Findings suggest that practitioners do not follow highly systematic processes, but instead rely on situated forms of knowing and acting in which they draw from precedent and use methods and principles that are determined appropriate in the moment. These findings have implications for how visualization researchers understand and engage with practitioners, and how educators approach the training of future data visualization designers.", "abstracts": [ { "abstractType": "Regular", "content": "Professional roles for data visualization designers are growing in popularity, and interest in relationships between the academic research and professional practice communities is gaining traction. However, despite the potential for knowledge sharing between these communities, we have little understanding of the ways in which practitioners design in real-world, professional settings. Inquiry in numerous design disciplines indicates that practitioners approach complex situations in ways that are fundamentally different from those of researchers. In this work, I take a practice-led approach to understanding visualization design practice on its own terms. Twenty data visualization practitioners were interviewed and asked about their design process, including the steps they take, how they make decisions, and the methods they use. Findings suggest that practitioners do not follow highly systematic processes, but instead rely on situated forms of knowing and acting in which they draw from precedent and use methods and principles that are determined appropriate in the moment. These findings have implications for how visualization researchers understand and engage with practitioners, and how educators approach the training of future data visualization designers.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Professional roles for data visualization designers are growing in popularity, and interest in relationships between the academic research and professional practice communities is gaining traction. However, despite the potential for knowledge sharing between these communities, we have little understanding of the ways in which practitioners design in real-world, professional settings. Inquiry in numerous design disciplines indicates that practitioners approach complex situations in ways that are fundamentally different from those of researchers. In this work, I take a practice-led approach to understanding visualization design practice on its own terms. Twenty data visualization practitioners were interviewed and asked about their design process, including the steps they take, how they make decisions, and the methods they use. Findings suggest that practitioners do not follow highly systematic processes, but instead rely on situated forms of knowing and acting in which they draw from precedent and use methods and principles that are determined appropriate in the moment. These findings have implications for how visualization researchers understand and engage with practitioners, and how educators approach the training of future data visualization designers.", "title": "Understanding Data Visualization Design Practice", "normalizedTitle": "Understanding Data Visualization Design Practice", "fno": "09555646", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualization", "Human Computer Interaction", "Design Methodology", "Visualization", "Production", "Complexity Theory", "Scholarships", "Design Practice", "Data Visualization", "Design Methods", "Design Process", "Research Practice Relationships" ], "authors": [ { "givenName": "Paul", "surname": "Parsons", "fullName": "Paul Parsons", "affiliation": "Purdue University, United States", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2022-01-01 00:00:00", "pubType": "trans", "pages": "665-675", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iv/2005/2397/0/23970070", "title": "Knowledge Visualization in Practice: Challenges for Future Corporate Communication", "doi": null, "abstractUrl": "/proceedings-article/iv/2005/23970070/12OmNrAMF2Y", "parentPublication": { "id": "proceedings/iv/2005/2397/0", "title": "Ninth International Conference on Information Visualisation (IV'05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2012/06/mcg2012060088", "title": "Understanding Visualization by Understanding Individual Users", "doi": null, "abstractUrl": "/magazine/cg/2012/06/mcg2012060088/13rRUNvya3t", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/12/ttg2012122819", "title": "SnapShot: Visualization to Propel Ice Hockey Analytics", "doi": null, "abstractUrl": "/journal/tg/2012/12/ttg2012122819/13rRUwjGoG1", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/ic/2015/06/mic2015060060", "title": "Natural Interaction with Visualization Systems", "doi": null, "abstractUrl": "/magazine/ic/2015/06/mic2015060060/13rRUxCRFSG", "parentPublication": { "id": "mags/ic", "title": "IEEE Internet Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2018/7202/0/720200a362", "title": "Visual Design Thinking: Understanding the Role of Knowledge Visualization in the Design Thinking Process", "doi": null, "abstractUrl": "/proceedings-article/iv/2018/720200a362/17D45X2fUEw", "parentPublication": { "id": "proceedings/iv/2018/7202/0", "title": "2018 22nd International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2019/4941/0/08933762", "title": "Sociotechnical Considerations for Accessible Visualization Design", "doi": null, "abstractUrl": "/proceedings-article/vis/2019/08933762/1fTgGk6sRfa", "parentPublication": { "id": "proceedings/vis/2019/4941/0", "title": "2019 IEEE Visualization Conference (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2020/8014/0/801400a176", "title": "Design Judgment in Data Visualization Practice", "doi": null, "abstractUrl": "/proceedings-article/vis/2020/801400a176/1qROcCF5NiU", "parentPublication": { "id": "proceedings/vis/2020/8014/0", "title": "2020 IEEE Visualization Conference (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2020/8014/0/801400a211", "title": "Data Visualization Practitioners&#x2019; Perspectives on Chartjunk", "doi": null, "abstractUrl": "/proceedings-article/vis/2020/801400a211/1qROcw0EDja", "parentPublication": { "id": "proceedings/vis/2020/8014/0", "title": "2020 IEEE Visualization Conference (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2021/3335/0/333500a196", "title": "How Learners Sketch Data Stories", "doi": null, "abstractUrl": "/proceedings-article/vis/2021/333500a196/1yXuabwlVYs", "parentPublication": { "id": "proceedings/vis/2021/3335/0", "title": "2021 IEEE Visualization Conference (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2021/3335/0/333500a076", "title": "Fixation and Creativity in Data Visualization Design: Experiences and Perspectives of Practitioners", "doi": null, "abstractUrl": "/proceedings-article/vis/2021/333500a076/1yXuiT4GCbK", "parentPublication": { "id": "proceedings/vis/2021/3335/0", "title": "2021 IEEE Visualization Conference (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09572234", "articleId": "1xH5FXdMnoA", "__typename": "AdjacentArticleType" }, "next": { "fno": "09552876", "articleId": "1xic1wsZtLi", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1zBaVX4F6JG", "name": "ttg202201-09555646s1-supp1-3114959.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09555646s1-supp1-3114959.pdf", "extension": "pdf", "size": "49.7 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1zBamVZHyne", "title": "Jan.", "year": "2022", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1xic1wsZtLi", "doi": "10.1109/TVCG.2021.3114811", "abstract": "Despite the ubiquity of communicative visualizations, specifying communicative intent during design is ad hoc. Whether we are selecting from a set of visualizations, commissioning someone to produce them, or creating them ourselves, an effective way of specifying intent can help guide this process. Ideally, we would have a concise and shared specification language. In previous work, we have argued that communicative intents can be viewed as a learning/assessment problem (i.e., what should the reader learn and what test should they do well on). Learning-based specification formats are linked (e.g., assessments are derived from objectives) but some may more effectively specify communicative intent. Through a large-scale experiment, we studied three specification types: learning objectives, insights, and assessments. Participants, guided by one of these specifications, rated their preferences for a set of visualization designs. Then, we evaluated the set of visualization designs to assess which specification led participants to prefer the most effective visualizations. We find that while all specification types have benefits over no-specification, each format has its own advantages. Our results show that learning objective-based specifications helped participants the most in visualization selection. We also identify situations in which specifications may be insufficient and assessments are vital.", "abstracts": [ { "abstractType": "Regular", "content": "Despite the ubiquity of communicative visualizations, specifying communicative intent during design is ad hoc. Whether we are selecting from a set of visualizations, commissioning someone to produce them, or creating them ourselves, an effective way of specifying intent can help guide this process. Ideally, we would have a concise and shared specification language. In previous work, we have argued that communicative intents can be viewed as a learning/assessment problem (i.e., what should the reader learn and what test should they do well on). Learning-based specification formats are linked (e.g., assessments are derived from objectives) but some may more effectively specify communicative intent. Through a large-scale experiment, we studied three specification types: learning objectives, insights, and assessments. Participants, guided by one of these specifications, rated their preferences for a set of visualization designs. Then, we evaluated the set of visualization designs to assess which specification led participants to prefer the most effective visualizations. We find that while all specification types have benefits over no-specification, each format has its own advantages. Our results show that learning objective-based specifications helped participants the most in visualization selection. We also identify situations in which specifications may be insufficient and assessments are vital.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Despite the ubiquity of communicative visualizations, specifying communicative intent during design is ad hoc. Whether we are selecting from a set of visualizations, commissioning someone to produce them, or creating them ourselves, an effective way of specifying intent can help guide this process. Ideally, we would have a concise and shared specification language. In previous work, we have argued that communicative intents can be viewed as a learning/assessment problem (i.e., what should the reader learn and what test should they do well on). Learning-based specification formats are linked (e.g., assessments are derived from objectives) but some may more effectively specify communicative intent. Through a large-scale experiment, we studied three specification types: learning objectives, insights, and assessments. Participants, guided by one of these specifications, rated their preferences for a set of visualization designs. Then, we evaluated the set of visualization designs to assess which specification led participants to prefer the most effective visualizations. We find that while all specification types have benefits over no-specification, each format has its own advantages. Our results show that learning objective-based specifications helped participants the most in visualization selection. We also identify situations in which specifications may be insufficient and assessments are vital.", "title": "Learning Objectives, Insights, and Assessments: How Specification Formats Impact Design", "normalizedTitle": "Learning Objectives, Insights, and Assessments: How Specification Formats Impact Design", "fno": "09552876", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Aided Instruction", "Data Visualisation", "Formal Specification", "Learning Artificial Intelligence", "Specification Languages", "Specification Formats Impact Design", "Communicative Visualizations", "Communicative Intent", "Specifying Intent", "Specification Language", "Learning Based Specification Formats", "Specification Types", "Learning Objectives", "Visualization Designs", "Effective Visualizations", "Objective Based Specifications", "Visualization Selection", "Visualization", "Task Analysis", "Data Visualization", "Stakeholders", "Usability", "Taxonomy", "Interviews", "Communicative Visualization", "Evaluation", "Visualization Specification" ], "authors": [ { "givenName": "Elsie", "surname": "Lee-Robbins", "fullName": "Elsie Lee-Robbins", "affiliation": "University of Michigan, School of Information, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Shiqing", "surname": "He", "fullName": "Shiqing He", "affiliation": "University of Michigan, School of Information, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Eytan", "surname": "Adar", "fullName": "Eytan Adar", "affiliation": "University of Michigan, School of Information, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2022-01-01 00:00:00", "pubType": "trans", "pages": "676-685", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/re/2002/1465/0/14650071", "title": "On the Use of Visualization in Formal Requirements Specification", "doi": null, "abstractUrl": "/proceedings-article/re/2002/14650071/12OmNAqU4Tu", "parentPublication": { "id": "proceedings/re/2002/1465/0", "title": "Proceedings IEEE Joint International Conference on Requirements Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icst/2017/6031/0/6031a436", "title": "Generic and Effective Specification of Structural Test Objectives", "doi": null, "abstractUrl": "/proceedings-article/icst/2017/6031a436/12OmNvjgWYc", "parentPublication": { "id": "proceedings/icst/2017/6031/0", "title": "2017 IEEE International Conference on Software Testing, Verification and Validation (ICST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/re/2017/3191/0/3191a223", "title": "Usability Insights for Requirements Engineering Tools: A User Study with Practitioners in Aeronautics", "doi": null, "abstractUrl": "/proceedings-article/re/2017/3191a223/12OmNxE2n0g", "parentPublication": { "id": "proceedings/re/2017/3191/0", "title": "2017 IEEE 25th International Requirements Engineering Conference (RE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ichi/2015/9548/0/9548a471", "title": "Augmenting Drug Discussions in General Practice", "doi": null, "abstractUrl": "/proceedings-article/ichi/2015/9548a471/12OmNyUFfKj", "parentPublication": { "id": "proceedings/ichi/2015/9548/0", "title": "2015 International Conference on Healthcare Informatics (ICHI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/10/08438968", "title": "Node-Link or Adjacency Matrices: Old Question, New Insights", "doi": null, "abstractUrl": "/journal/tg/2019/10/08438968/13rRUwjoNx8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/12/07369991", "title": "The Elicitation Interview Technique: Capturing People's Experiences of Data Representations", "doi": null, "abstractUrl": "/journal/tg/2016/12/07369991/13rRUxBa5s2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2015/04/mcg2015040028", "title": "Characterizing Visualization Insights from Quantified Selfers' Personal Data Presentations", "doi": null, "abstractUrl": "/magazine/cg/2015/04/mcg2015040028/13rRUxCRFQl", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09905872", "title": "Affective Learning Objectives for Communicative Visualizations", "doi": null, "abstractUrl": "/journal/tg/2023/01/09905872/1H3ZV2tCxTa", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09222102", "title": "Communicative Visualizations as a Learning Problem", "doi": null, "abstractUrl": "/journal/tg/2021/02/09222102/1nTr1JohElO", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09417674", "title": "Nebula: A Coordinating Grammar of Graphics", "doi": null, "abstractUrl": "/journal/tg/2022/12/09417674/1taANyFFcmQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09555646", "articleId": "1xlw1u3Uiw8", "__typename": "AdjacentArticleType" }, "next": { "fno": "09555457", "articleId": "1xjR0ZcLMNW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1zBaFuFgSYg", "name": "ttg202201-09552876s1-tvcg-3114811-mm.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09552876s1-tvcg-3114811-mm.zip", "extension": "zip", "size": "4.25 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1zBamVZHyne", "title": "Jan.", "year": "2022", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1xjR0ZcLMNW", "doi": "10.1109/TVCG.2021.3114830", "abstract": "Working with data in table form is usually considered a preparatory and tedious step in the sensemaking pipeline; a way of getting the data ready for more sophisticated visualization and analytical tools. But for many people, spreadsheets &#x2014; the quintessential table tool &#x2014; remain a critical part of their information ecosystem, allowing them to interact with their data in ways that are hidden or abstracted in more complex tools. This is particularly true for <italic>data workers</italic> <xref ref-type=\"bibr\" rid=\"ref61\">[61]</xref>, people who work with data as part of their job but do not identify as professional analysts or data scientists. We report on a qualitative study of how these workers interact with and reason about their data. Our findings show that data tables serve a broader purpose beyond data cleanup at the initial stage of a linear analytic flow: users want to see and &#x201C;get their hands on&#x201D; the underlying data throughout the analytics process, reshaping and augmenting it to support sensemaking. They reorganize, mark up, layer on levels of detail, and spawn alternatives within the context of the base data. These direct interactions and human-readable table representations form a rich and cognitively important part of building understanding of what the data mean and what they can do with it. We argue that interactive tables are an important visualization idiom in their own right; that the direct data interaction they afford offers a fertile design space for visual analytics; and that sense making can be enriched by more flexible human-data interaction than is currently supported in visual analytics tools.", "abstracts": [ { "abstractType": "Regular", "content": "Working with data in table form is usually considered a preparatory and tedious step in the sensemaking pipeline; a way of getting the data ready for more sophisticated visualization and analytical tools. But for many people, spreadsheets &#x2014; the quintessential table tool &#x2014; remain a critical part of their information ecosystem, allowing them to interact with their data in ways that are hidden or abstracted in more complex tools. This is particularly true for <italic>data workers</italic> <xref ref-type=\"bibr\" rid=\"ref61\">[61]</xref>, people who work with data as part of their job but do not identify as professional analysts or data scientists. We report on a qualitative study of how these workers interact with and reason about their data. Our findings show that data tables serve a broader purpose beyond data cleanup at the initial stage of a linear analytic flow: users want to see and &#x201C;get their hands on&#x201D; the underlying data throughout the analytics process, reshaping and augmenting it to support sensemaking. They reorganize, mark up, layer on levels of detail, and spawn alternatives within the context of the base data. These direct interactions and human-readable table representations form a rich and cognitively important part of building understanding of what the data mean and what they can do with it. We argue that interactive tables are an important visualization idiom in their own right; that the direct data interaction they afford offers a fertile design space for visual analytics; and that sense making can be enriched by more flexible human-data interaction than is currently supported in visual analytics tools.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Working with data in table form is usually considered a preparatory and tedious step in the sensemaking pipeline; a way of getting the data ready for more sophisticated visualization and analytical tools. But for many people, spreadsheets — the quintessential table tool — remain a critical part of their information ecosystem, allowing them to interact with their data in ways that are hidden or abstracted in more complex tools. This is particularly true for data workers [61], people who work with data as part of their job but do not identify as professional analysts or data scientists. We report on a qualitative study of how these workers interact with and reason about their data. Our findings show that data tables serve a broader purpose beyond data cleanup at the initial stage of a linear analytic flow: users want to see and “get their hands on” the underlying data throughout the analytics process, reshaping and augmenting it to support sensemaking. They reorganize, mark up, layer on levels of detail, and spawn alternatives within the context of the base data. These direct interactions and human-readable table representations form a rich and cognitively important part of building understanding of what the data mean and what they can do with it. We argue that interactive tables are an important visualization idiom in their own right; that the direct data interaction they afford offers a fertile design space for visual analytics; and that sense making can be enriched by more flexible human-data interaction than is currently supported in visual analytics tools.", "title": "Untidy Data: The Unreasonable Effectiveness of Tables", "normalizedTitle": "Untidy Data: The Unreasonable Effectiveness of Tables", "fno": "09555457", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Analysis", "Data Visualisation", "Human Computer Interaction", "Personnel", "Professional Aspects", "Untidy Data", "Unreasonable Effectiveness", "Table Form", "Preparatory Step", "Tedious Step", "Sensemaking Pipeline", "Sophisticated Visualization", "Analytical Tools", "Quintessential Table Tool", "Information Ecosystem", "Complex Tools", "Data Workers", "Professional Analysts", "Workers Interact", "Data Tables", "Data Cleanup", "Linear Analytic Flow", "Analytics Process", "Base Data", "Direct Interactions", "Human Readable Table Representations", "Rich Part", "Cognitively Important Part", "Data Mean", "Interactive Tables", "Important Visualization Idiom", "Direct Data Interaction", "Flexible Human Data Interaction", "Visual Analytics Tools", "Tools", "Data Visualization", "Visual Analytics", "Cleaning", "Annotations", "Affordances", "Organizations", "Data Practices", "Tabular Data", "Interview Study", "Visualization", "Analytics", "Data Workers", "Sensemaking" ], "authors": [ { "givenName": "Lyn", "surname": "Bartram", "fullName": "Lyn Bartram", "affiliation": "Simon Fraser University, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "Michael", "surname": "Correll", "fullName": "Michael Correll", "affiliation": "Tableau, United States", "__typename": "ArticleAuthorType" }, { "givenName": "Melanie", "surname": "Tory", "fullName": "Melanie Tory", "affiliation": "Roux Institute, United States", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "01", "pubDate": "2022-01-01 00:00:00", "pubType": "trans", "pages": "686-696", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vast/2012/4752/0/06400558", "title": "SocialNetSense: Supporting sensemaking of social and structural features in networks with interactive visualization", "doi": null, "abstractUrl": "/proceedings-article/vast/2012/06400558/12OmNxdm4ya", "parentPublication": { "id": "proceedings/vast/2012/4752/0", "title": "2012 IEEE Conference on Visual Analytics Science and Technology (VAST 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/01/07194834", "title": "SensePath: Understanding the Sensemaking Process Through Analytic Provenance", "doi": null, "abstractUrl": "/journal/tg/2016/01/07194834/13rRUEgarnM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/12/ttg2012122908", "title": "The User Puzzle—Explaining the Interaction with Visual Analytics Systems", "doi": null, "abstractUrl": "/journal/tg/2012/12/ttg2012122908/13rRUIIVlcH", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/05/ttg2011050570", "title": "How Can Visual Analytics Assist Investigative Analysis? Design Implications from an Evaluation", "doi": null, "abstractUrl": "/journal/tg/2011/05/ttg2011050570/13rRUILLkvl", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/12/ttg2012122869", "title": "Examining the Use of a Visual Analytics System for Sensemaking Tasks: Case Studies with Domain Experts", "doi": null, "abstractUrl": "/journal/tg/2012/12/ttg2012122869/13rRUxNmPDT", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2017/3163/0/08585484", "title": "CRICTO: Supporting Sensemaking through Crowdsourced Information Schematization", "doi": null, "abstractUrl": "/proceedings-article/vast/2017/08585484/17D45Wc1ILV", "parentPublication": { "id": "proceedings/vast/2017/3163/0", "title": "2017 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09894094", "title": "Exploring the Evolution of Sensemaking Strategies in Immersive Space to Think", "doi": null, "abstractUrl": "/journal/tg/5555/01/09894094/1GIqpC6j7na", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2021/3827/0/382700a211", "title": "Visual Analytics and Similarity Search - Interest-based Similarity Search in Scientific Data", "doi": null, "abstractUrl": "/proceedings-article/iv/2021/382700a211/1y4oJ30dpcY", "parentPublication": { "id": "proceedings/iv/2021/3827/0", "title": "2021 25th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mlui/2021/1372/0/137200a001", "title": "DocTable: Table-Oriented Interactive Machine Learning for Text Corpora", "doi": null, "abstractUrl": "/proceedings-article/mlui/2021/137200a001/1yNhhY4pkgo", "parentPublication": { "id": "proceedings/mlui/2021/1372/0", "title": "2021 IEEE Workshop on Machine Learning from User Interactions (MLUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2021/3335/0/333500a181", "title": "Narrative Sensemaking: Strategies for Narrative Maps Construction", "doi": null, "abstractUrl": "/proceedings-article/vis/2021/333500a181/1yXuj3PJXRm", "parentPublication": { "id": "proceedings/vis/2021/3335/0", "title": "2021 IEEE Visualization Conference (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09552876", "articleId": "1xic1wsZtLi", "__typename": "AdjacentArticleType" }, "next": { "fno": "09552216", "articleId": "1xic1HOWGli", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1zJiGIqQfMA", "name": "ttg202201-09555457s1-tvcg-3114830-mm.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09555457s1-tvcg-3114830-mm.zip", "extension": "zip", "size": "18.3 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1zBamVZHyne", "title": "Jan.", "year": "2022", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1xic1HOWGli", "doi": "10.1109/TVCG.2021.3114780", "abstract": "People's associations between colors and concepts influence their ability to interpret the meanings of colors in information visualizations. Previous work has suggested such effects are limited to concepts that have strong, specific associations with colors. However, although a concept may not be strongly associated with any colors, its mapping can be disambiguated in the context of other concepts in an encoding system. We articulate this view in semantic discriminability theory, a general framework for understanding conditions determining when people can infer meaning from perceptual features. Semantic discriminability is the degree to which observers can infer a unique mapping between visual features and concepts. Semantic discriminability theory posits that the capacity for semantic discriminability for a set of concepts is constrained by the difference between the feature-concept association distributions across the concepts in the set. We define formal properties of this theory and test its implications in two experiments. The results show that the capacity to produce semantically discriminable colors for sets of concepts was indeed constrained by the statistical distance between color-concept association distributions (Experiment 1). Moreover, people could interpret meanings of colors in bar graphs insofar as the colors were semantically discriminable, even for concepts previously considered “non-colorable” (Experiment 2). The results suggest that colors are more robust for visual communication than previously thought.", "abstracts": [ { "abstractType": "Regular", "content": "People's associations between colors and concepts influence their ability to interpret the meanings of colors in information visualizations. Previous work has suggested such effects are limited to concepts that have strong, specific associations with colors. However, although a concept may not be strongly associated with any colors, its mapping can be disambiguated in the context of other concepts in an encoding system. We articulate this view in semantic discriminability theory, a general framework for understanding conditions determining when people can infer meaning from perceptual features. Semantic discriminability is the degree to which observers can infer a unique mapping between visual features and concepts. Semantic discriminability theory posits that the capacity for semantic discriminability for a set of concepts is constrained by the difference between the feature-concept association distributions across the concepts in the set. We define formal properties of this theory and test its implications in two experiments. The results show that the capacity to produce semantically discriminable colors for sets of concepts was indeed constrained by the statistical distance between color-concept association distributions (Experiment 1). Moreover, people could interpret meanings of colors in bar graphs insofar as the colors were semantically discriminable, even for concepts previously considered “non-colorable” (Experiment 2). The results suggest that colors are more robust for visual communication than previously thought.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "People's associations between colors and concepts influence their ability to interpret the meanings of colors in information visualizations. Previous work has suggested such effects are limited to concepts that have strong, specific associations with colors. However, although a concept may not be strongly associated with any colors, its mapping can be disambiguated in the context of other concepts in an encoding system. We articulate this view in semantic discriminability theory, a general framework for understanding conditions determining when people can infer meaning from perceptual features. Semantic discriminability is the degree to which observers can infer a unique mapping between visual features and concepts. Semantic discriminability theory posits that the capacity for semantic discriminability for a set of concepts is constrained by the difference between the feature-concept association distributions across the concepts in the set. We define formal properties of this theory and test its implications in two experiments. The results show that the capacity to produce semantically discriminable colors for sets of concepts was indeed constrained by the statistical distance between color-concept association distributions (Experiment 1). Moreover, people could interpret meanings of colors in bar graphs insofar as the colors were semantically discriminable, even for concepts previously considered “non-colorable” (Experiment 2). The results suggest that colors are more robust for visual communication than previously thought.", "title": "Context Matters: A Theory of Semantic Discriminability for Perceptual Encoding Systems", "normalizedTitle": "Context Matters: A Theory of Semantic Discriminability for Perceptual Encoding Systems", "fno": "09552216", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Color", "Semantics", "Visualization", "Image Color Analysis", "Encoding", "Plastics", "Visual Communication", "Visual Reasoning", "Information Visualization", "Visual Communication", "Visual Encoding", "Color Cognition" ], "authors": [ { "givenName": "Kushin", "surname": "Mukherjee", "fullName": "Kushin Mukherjee", "affiliation": "Psychology and Wisconsin Institute for Discovery University of Wisconsin_Madison, United States", "__typename": "ArticleAuthorType" }, { "givenName": "Brian", "surname": "Yin", "fullName": "Brian Yin", "affiliation": "Cognitive Science University of California, Berkeley", "__typename": "ArticleAuthorType" }, { "givenName": "Brianne E.", "surname": "Sherman", "fullName": "Brianne E. Sherman", "affiliation": "Neurobiology and Wisconsin Institute for Discovery University of Wisconsin-Madison, United States", "__typename": "ArticleAuthorType" }, { "givenName": "Laurent", "surname": "Lessard", "fullName": "Laurent Lessard", "affiliation": "Mechanical and Industrial Engineering Northeastern University, United States", "__typename": "ArticleAuthorType" }, { "givenName": "Karen B.", "surname": "Schloss", "fullName": "Karen B. Schloss", "affiliation": "Psychology and Wisconsin Institute for Discovery University of Wisconsin-Madison, United States", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2022-01-01 00:00:00", "pubType": "trans", "pages": "697-706", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/dsc/2018/4210/0/421001a837", "title": "Automatic Taxonomy Construction for Eye Colors Data without Using Context Information", "doi": null, "abstractUrl": "/proceedings-article/dsc/2018/421001a837/12OmNviZlCk", "parentPublication": { "id": "proceedings/dsc/2018/4210/0", "title": "2018 IEEE Third International Conference on Data Science in Cyberspace (DSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2011/0063/0/06130335", "title": "A theory of color barcodes", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2011/06130335/12OmNyQ7FOF", "parentPublication": { "id": "proceedings/iccvw/2011/0063/0", "title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07539386", "title": "Colorgorical: Creating discriminable and preferable color palettes for information visualization", "doi": null, "abstractUrl": "/journal/tg/2017/01/07539386/13rRUxlgy3M", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nicoint/2022/6908/0/690800a021", "title": "Perceptual Control of Food Taste with Projection Mapping", "doi": null, "abstractUrl": "/proceedings-article/nicoint/2022/690800a021/1FWmZYvi4MM", "parentPublication": { "id": "proceedings/nicoint/2022/6908/0", "title": "2022 Nicograph International (NicoInt)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09904484", "title": "Self-Supervised Color-Concept Association via Image Colorization", "doi": null, "abstractUrl": "/journal/tg/2023/01/09904484/1H1ggMqzJUQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09905997", "title": "Unifying Effects of Direct and Relational Associations for Visual Communication", "doi": null, "abstractUrl": "/journal/tg/2023/01/09905997/1H3ZWHY73by", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09969167", "title": "Image-Driven Harmonious Color Palette Generation for Diverse Information Visualization", "doi": null, "abstractUrl": "/journal/tg/5555/01/09969167/1IMicNIXex2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08809846", "title": "Estimating Color-Concept Associations from Image Statistics", "doi": null, "abstractUrl": "/journal/tg/2020/01/08809846/1cHEoEeTId2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800i866", "title": "Harmonizing Transferability and Discriminability for Adapting Object Detectors", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800i866/1m3oc7dAkbS", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09239918", "title": "Semantic Discriminability for Visual Communication", "doi": null, "abstractUrl": "/journal/tg/2021/02/09239918/1oeZWSkMqre", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09555457", "articleId": "1xjR0ZcLMNW", "__typename": "AdjacentArticleType" }, "next": { "fno": "09557878", "articleId": "1xquNQMVFCM", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1zBaFJNYm7C", "name": "ttg202201-09552216s1-supp1-3114780.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09552216s1-supp1-3114780.pdf", "extension": "pdf", "size": "458 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1zBamVZHyne", "title": "Jan.", "year": "2022", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1xquNQMVFCM", "doi": "10.1109/TVCG.2021.3114684", "abstract": "Data can be visually represented using visual channels like position, length or luminance. An existing ranking of these visual channels is based on how accurately participants could report the ratio between two depicted values. There is an assumption that this ranking should hold for different tasks and for different numbers of marks. However, there is surprisingly little existing work that tests this assumption, especially given that visually computing ratios is relatively unimportant in real-world visualizations, compared to seeing, remembering, and comparing trends and motifs, across displays that almost universally depict more than two values. To simulate the information extracted from a glance at a visualization, we instead asked participants to immediately reproduce a set of values from memory after they were shown the visualization. These values could be shown in a bar graph (position (bar)), line graph (position (line)), heat map (luminance), bubble chart (area), misaligned bar graph (length), or &#x2018;wind map&#x2019; (angle). With a Bayesian multilevel modeling approach, we show how the rank positions of visual channels shift across different numbers of marks (2, 4 or 8) and for bias, precision, and error measures. The ranking did not hold, even for reproductions of only 2 marks, and the new probabilistic ranking was highly inconsistent for reproductions of different numbers of marks. Other factors besides channel choice had an order of magnitude more influence on performance, such as the number of values in the series (e.g., more marks led to larger errors), or the value of each mark (e.g., small values were systematically overestimated). Every visual channel was worse for displays with 8 marks than 4, consistent with established limits on visual memory. These results point to the need for a body of empirical studies that move beyond two-value ratio judgments as a baseline for reliably ranking the quality of a visual channel, including testing new tasks (detection of trends or motifs), timescales (immediate computation, or later comparison), and the number of values (from a handful, to thousands).", "abstracts": [ { "abstractType": "Regular", "content": "Data can be visually represented using visual channels like position, length or luminance. An existing ranking of these visual channels is based on how accurately participants could report the ratio between two depicted values. There is an assumption that this ranking should hold for different tasks and for different numbers of marks. However, there is surprisingly little existing work that tests this assumption, especially given that visually computing ratios is relatively unimportant in real-world visualizations, compared to seeing, remembering, and comparing trends and motifs, across displays that almost universally depict more than two values. To simulate the information extracted from a glance at a visualization, we instead asked participants to immediately reproduce a set of values from memory after they were shown the visualization. These values could be shown in a bar graph (position (bar)), line graph (position (line)), heat map (luminance), bubble chart (area), misaligned bar graph (length), or &#x2018;wind map&#x2019; (angle). With a Bayesian multilevel modeling approach, we show how the rank positions of visual channels shift across different numbers of marks (2, 4 or 8) and for bias, precision, and error measures. The ranking did not hold, even for reproductions of only 2 marks, and the new probabilistic ranking was highly inconsistent for reproductions of different numbers of marks. Other factors besides channel choice had an order of magnitude more influence on performance, such as the number of values in the series (e.g., more marks led to larger errors), or the value of each mark (e.g., small values were systematically overestimated). Every visual channel was worse for displays with 8 marks than 4, consistent with established limits on visual memory. These results point to the need for a body of empirical studies that move beyond two-value ratio judgments as a baseline for reliably ranking the quality of a visual channel, including testing new tasks (detection of trends or motifs), timescales (immediate computation, or later comparison), and the number of values (from a handful, to thousands).", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Data can be visually represented using visual channels like position, length or luminance. An existing ranking of these visual channels is based on how accurately participants could report the ratio between two depicted values. There is an assumption that this ranking should hold for different tasks and for different numbers of marks. However, there is surprisingly little existing work that tests this assumption, especially given that visually computing ratios is relatively unimportant in real-world visualizations, compared to seeing, remembering, and comparing trends and motifs, across displays that almost universally depict more than two values. To simulate the information extracted from a glance at a visualization, we instead asked participants to immediately reproduce a set of values from memory after they were shown the visualization. These values could be shown in a bar graph (position (bar)), line graph (position (line)), heat map (luminance), bubble chart (area), misaligned bar graph (length), or ‘wind map’ (angle). With a Bayesian multilevel modeling approach, we show how the rank positions of visual channels shift across different numbers of marks (2, 4 or 8) and for bias, precision, and error measures. The ranking did not hold, even for reproductions of only 2 marks, and the new probabilistic ranking was highly inconsistent for reproductions of different numbers of marks. Other factors besides channel choice had an order of magnitude more influence on performance, such as the number of values in the series (e.g., more marks led to larger errors), or the value of each mark (e.g., small values were systematically overestimated). Every visual channel was worse for displays with 8 marks than 4, consistent with established limits on visual memory. These results point to the need for a body of empirical studies that move beyond two-value ratio judgments as a baseline for reliably ranking the quality of a visual channel, including testing new tasks (detection of trends or motifs), timescales (immediate computation, or later comparison), and the number of values (from a handful, to thousands).", "title": "Rethinking the Ranks of Visual Channels", "normalizedTitle": "Rethinking the Ranks of Visual Channels", "fno": "09557878", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Task Analysis", "Visualization", "Bars", "Data Visualization", "Memory Management", "Measurement Uncertainty", "Correlation", "Data Type Agnostic", "Human Subjects Quantitative Studies", "Perception Cognition", "Charts Diagrams And Plots" ], "authors": [ { "givenName": "Caitlyn M.", "surname": "McColeman", "fullName": "Caitlyn M. McColeman", "affiliation": "Northwestern University, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Fumeng", "surname": "Yang", "fullName": "Fumeng Yang", "affiliation": "Brown University, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Timothy F.", "surname": "Brady", "fullName": "Timothy F. Brady", "affiliation": "University of San Diego, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Steven", "surname": "Franconeri", "fullName": "Steven Franconeri", "affiliation": "Northwestern University, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2022-01-01 00:00:00", "pubType": "trans", "pages": "707-717", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tg/2016/01/07192667", "title": "Visual Encodings of Temporal Uncertainty: A Comparative User Study", "doi": null, "abstractUrl": "/journal/tg/2016/01/07192667/13rRUwjGoLH", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/12/ttg2012122631", "title": "Graphical Overlays: Using Layered Elements to Aid Chart Reading", "doi": null, "abstractUrl": "/journal/tg/2012/12/ttg2012122631/13rRUyfKIHJ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09904487", "title": "Studying Early Decision Making with Progressive Bar Charts", "doi": null, "abstractUrl": "/journal/tg/2023/01/09904487/1H1geE4olvG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08805448", "title": "Illusion of Causality in Visualized Data", "doi": null, "abstractUrl": "/journal/tg/2020/01/08805448/1cG4Az22lFe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08807320", "title": "The Perceptual Proxies of Visual Comparison", "doi": null, "abstractUrl": "/journal/tg/2020/01/08807320/1cG6vb0dTG0", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08836120", "title": "Measures of the Benefit of Direct Encoding of Data Deltas for Data Pair Relation Perception", "doi": null, "abstractUrl": "/journal/tg/2020/01/08836120/1dia2KVa7g4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09222047", "title": "Truth or Square: Aspect Ratio Biases Recall of Position Encodings", "doi": null, "abstractUrl": "/journal/tg/2021/02/09222047/1nTqj3fbFXq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09238508", "title": "Revealing Perceptual Proxies with Adversarial Examples", "doi": null, "abstractUrl": "/journal/tg/2021/02/09238508/1oa15KNUtGg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09288884", "title": "No mark is an island: Precision and category repulsion biases in data reproductions", "doi": null, "abstractUrl": "/journal/tg/2021/02/09288884/1pq6f5VhVF6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2020/9134/0/913400a344", "title": "Comparison of four visual analytics techniques for the visualization of adverse drug event rates in clinical trials", "doi": null, "abstractUrl": "/proceedings-article/iv/2020/913400a344/1rSRc4omAj6", "parentPublication": { "id": "proceedings/iv/2020/9134/0", "title": "2020 24th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09552216", "articleId": "1xic1HOWGli", "__typename": "AdjacentArticleType" }, "next": { "fno": "09552881", "articleId": "1xibXzMLm9i", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1zJiGlHefQc", "name": "ttg202201-09557878s1-supp1-3114684.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09557878s1-supp1-3114684.pdf", "extension": "pdf", "size": "1.79 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1zBamVZHyne", "title": "Jan.", "year": "2022", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1xibXzMLm9i", "doi": "10.1109/TVCG.2021.3114874", "abstract": "One of the fundamental tasks in visualization is to compare two or more visual elements. However, it is often difficult to visually differentiate graphical elements encoding a small difference in value, such as the heights of similar bars in bar chart or angles of similar sections in pie chart. Perceptual laws can be used in order to model when and how we perceive this difference. In this work, we model the perception of Just Noticeable Differences (JNDs), the minimum difference in visual attributes that allow faithfully comparing similar elements, in charts. Specifically, we explore the relation between JNDs and two major visual variables: the intensity of visual elements and the distance between them, and study it in three charts: bar chart, pie chart and bubble chart. Through an empirical study, we identify main effects on JND for distance in bar charts, intensity in pie charts, and both distance and intensity in bubble charts. By fitting a linear mixed effects model, we model JND and find that JND grows as the exponential function of variables. We highlight several usage scenarios that make use of the JND modeling in which elements below the fitted JND are detected and enhanced with secondary visual cues for better discrimination.", "abstracts": [ { "abstractType": "Regular", "content": "One of the fundamental tasks in visualization is to compare two or more visual elements. However, it is often difficult to visually differentiate graphical elements encoding a small difference in value, such as the heights of similar bars in bar chart or angles of similar sections in pie chart. Perceptual laws can be used in order to model when and how we perceive this difference. In this work, we model the perception of Just Noticeable Differences (JNDs), the minimum difference in visual attributes that allow faithfully comparing similar elements, in charts. Specifically, we explore the relation between JNDs and two major visual variables: the intensity of visual elements and the distance between them, and study it in three charts: bar chart, pie chart and bubble chart. Through an empirical study, we identify main effects on JND for distance in bar charts, intensity in pie charts, and both distance and intensity in bubble charts. By fitting a linear mixed effects model, we model JND and find that JND grows as the exponential function of variables. We highlight several usage scenarios that make use of the JND modeling in which elements below the fitted JND are detected and enhanced with secondary visual cues for better discrimination.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "One of the fundamental tasks in visualization is to compare two or more visual elements. However, it is often difficult to visually differentiate graphical elements encoding a small difference in value, such as the heights of similar bars in bar chart or angles of similar sections in pie chart. Perceptual laws can be used in order to model when and how we perceive this difference. In this work, we model the perception of Just Noticeable Differences (JNDs), the minimum difference in visual attributes that allow faithfully comparing similar elements, in charts. Specifically, we explore the relation between JNDs and two major visual variables: the intensity of visual elements and the distance between them, and study it in three charts: bar chart, pie chart and bubble chart. Through an empirical study, we identify main effects on JND for distance in bar charts, intensity in pie charts, and both distance and intensity in bubble charts. By fitting a linear mixed effects model, we model JND and find that JND grows as the exponential function of variables. We highlight several usage scenarios that make use of the JND modeling in which elements below the fitted JND are detected and enhanced with secondary visual cues for better discrimination.", "title": "Modeling Just Noticeable Differences in Charts", "normalizedTitle": "Modeling Just Noticeable Differences in Charts", "fno": "09552881", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Visualization", "Bars", "Task Analysis", "Fans", "Correlation", "Computational Modeling", "Three Dimensional Displays", "Visual Perception", "Charts", "Just Noticeable Difference", "Modeling" ], "authors": [ { "givenName": "Min", "surname": "Lu", "fullName": "Min Lu", "affiliation": "Shenzhen University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Joel", "surname": "Lanir", "fullName": "Joel Lanir", "affiliation": "The University of Haifa, Israel", "__typename": "ArticleAuthorType" }, { "givenName": "Chufeng", "surname": "Wang", "fullName": "Chufeng Wang", "affiliation": "Shenzhen University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yucong", "surname": "Yao", "fullName": "Yucong Yao", "affiliation": "Shenzhen University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Wen", "surname": "Zhang", "fullName": "Wen Zhang", "affiliation": "Shenzhen University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Oliver", "surname": "Deussen", "fullName": "Oliver Deussen", "affiliation": "University of Konstanz, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Hui", "surname": "Huang", "fullName": "Hui Huang", "affiliation": "Shenzhen University, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2022-01-01 00:00:00", "pubType": "trans", "pages": "718-726", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/fie/2014/3922/0/07043983", "title": "Your data deserve better than pies and bars: An R graphics workshop for the timid", "doi": null, "abstractUrl": "/proceedings-article/fie/2014/07043983/12OmNz3bdDC", "parentPublication": { "id": "proceedings/fie/2014/3922/0", "title": "2014 IEEE Frontiers in Education Conference (FIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/03/07845717", "title": "Converting Basic D3 Charts into Reusable Style Templates", "doi": null, "abstractUrl": "/journal/tg/2018/03/07845717/13rRUxYINfm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09904487", "title": "Studying Early Decision Making with Progressive Bar Charts", "doi": null, "abstractUrl": "/journal/tg/2023/01/09904487/1H1geE4olvG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2022/9007/0/900700a067", "title": "An Overview of the Design and Development for Dynamic and Physical Bar Charts", "doi": null, "abstractUrl": "/proceedings-article/iv/2022/900700a067/1KaH61BvDWw", "parentPublication": { "id": "proceedings/iv/2022/9007/0", "title": "2022 26th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08807238", "title": "A Comparison of Radial and Linear Charts for Visualizing Daily Patterns", "doi": null, "abstractUrl": "/journal/tg/2020/01/08807238/1cG66qf6MKs", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2019/2838/0/283800a151", "title": "The Cost of Pie Charts", "doi": null, "abstractUrl": "/proceedings-article/iv/2019/283800a151/1cMFcqwGM5q", "parentPublication": { "id": "proceedings/iv/2019/2838/0", "title": "2019 23rd International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2019/4941/0/08933547", "title": "Evidence for Area as the Primary Visual Cue in Pie Charts", "doi": null, "abstractUrl": "/proceedings-article/vis/2019/08933547/1fTgFhkepQk", "parentPublication": { "id": "proceedings/vis/2019/4941/0", "title": "2019 IEEE Visualization Conference (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090530", "title": "A Just Noticeable Difference for Perceiving Virtual Surfaces through Haptic Interaction", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090530/1jIxtOsYn16", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/01/09552208", "title": "Visual Arrangements of Bar Charts Influence Comparisons in Viewer Takeaways", "doi": null, "abstractUrl": "/journal/tg/2022/01/09552208/1xibWU97C8w", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/01/09585700", "title": "A Mixed-Initiative Approach to Reusing Infographic Charts", "doi": null, "abstractUrl": "/journal/tg/2022/01/09585700/1y11cGSPuPC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09557878", "articleId": "1xquNQMVFCM", "__typename": "AdjacentArticleType" }, "next": { "fno": "09552227", "articleId": "1xibX4wTR8Q", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1zBamVZHyne", "title": "Jan.", "year": "2022", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1xibX4wTR8Q", "doi": "10.1109/TVCG.2021.3114693", "abstract": "In this paper, we report on a study of visual representations for cyclical data and the effect of interactively <italic>wrapping</italic> a bar chart &#x2018;around its boundaries&#x2019;. Compared to linear bar chart, polar (or radial) visualisations have the advantage that cyclical data can be presented continuously without mentally bridging the visual &#x2018;cut&#x2019; across the left-and-right boundaries. To investigate this hypothesis and to assess the effect the cut has on analysis performance, this paper presents results from a crowdsourced, controlled experiment with 72 participants comparing new continuous panning technique to linear bar charts (<italic>interactive wrapping</italic>). Our results show that bar charts with interactive wrapping lead to less errors compared to standard bar charts or polar charts. Inspired by these results, we generalise the concept of interactive wrapping to other visualisations for cyclical or relational data. We describe a design space based on the concept of one-dimensional wrapping and two-dimensional wrapping, linked to two common 3D topologies; cylinder and torus that can be used to metaphorically explain one- and two-dimensional wrapping. This design space suggests that interactive wrapping is widely applicable to many different data types.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we report on a study of visual representations for cyclical data and the effect of interactively <italic>wrapping</italic> a bar chart &#x2018;around its boundaries&#x2019;. Compared to linear bar chart, polar (or radial) visualisations have the advantage that cyclical data can be presented continuously without mentally bridging the visual &#x2018;cut&#x2019; across the left-and-right boundaries. To investigate this hypothesis and to assess the effect the cut has on analysis performance, this paper presents results from a crowdsourced, controlled experiment with 72 participants comparing new continuous panning technique to linear bar charts (<italic>interactive wrapping</italic>). Our results show that bar charts with interactive wrapping lead to less errors compared to standard bar charts or polar charts. Inspired by these results, we generalise the concept of interactive wrapping to other visualisations for cyclical or relational data. We describe a design space based on the concept of one-dimensional wrapping and two-dimensional wrapping, linked to two common 3D topologies; cylinder and torus that can be used to metaphorically explain one- and two-dimensional wrapping. This design space suggests that interactive wrapping is widely applicable to many different data types.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we report on a study of visual representations for cyclical data and the effect of interactively wrapping a bar chart ‘around its boundaries’. Compared to linear bar chart, polar (or radial) visualisations have the advantage that cyclical data can be presented continuously without mentally bridging the visual ‘cut’ across the left-and-right boundaries. To investigate this hypothesis and to assess the effect the cut has on analysis performance, this paper presents results from a crowdsourced, controlled experiment with 72 participants comparing new continuous panning technique to linear bar charts (interactive wrapping). Our results show that bar charts with interactive wrapping lead to less errors compared to standard bar charts or polar charts. Inspired by these results, we generalise the concept of interactive wrapping to other visualisations for cyclical or relational data. We describe a design space based on the concept of one-dimensional wrapping and two-dimensional wrapping, linked to two common 3D topologies; cylinder and torus that can be used to metaphorically explain one- and two-dimensional wrapping. This design space suggests that interactive wrapping is widely applicable to many different data types.", "title": "Rotate or Wrap? Interactive Visualisations of Cyclical Data on Cylindrical or Toroidal Topologies", "normalizedTitle": "Rotate or Wrap? Interactive Visualisations of Cyclical Data on Cylindrical or Toroidal Topologies", "fno": "09552227", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Bars", "Data Visualization", "Wrapping", "Task Analysis", "Topology", "Time Series Analysis", "Market Research", "Cyclic Temporal Data", "Cylindrical Topologies", "Toroidal Topologies", "Interaction Techniques", "Bar Charts", "Polar Charts", "Crowdsourced Experiment" ], "authors": [ { "givenName": "Kun-Ting", "surname": "Chen", "fullName": "Kun-Ting Chen", "affiliation": "Monash University, Australia", "__typename": "ArticleAuthorType" }, { "givenName": "Tim", "surname": "Dwyer", "fullName": "Tim Dwyer", "affiliation": "Monash University, Australia", "__typename": "ArticleAuthorType" }, { "givenName": "Benjamin", "surname": "Bach", "fullName": "Benjamin Bach", "affiliation": "University of Edinburgh, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Kim", "surname": "Marriott", "fullName": "Kim Marriott", "affiliation": "Monash University, Australia", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2022-01-01 00:00:00", "pubType": "trans", "pages": "727-736", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2018/6420/0/642000f648", "title": "DVQA: Understanding Data Visualizations via Question Answering", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000f648/17D45WZZ7EU", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/01/08443125", "title": "Glanceable Visualization: Studies of Data Comparison Performance on Smartwatches", "doi": null, "abstractUrl": "/journal/tg/2019/01/08443125/17D45XDIXRv", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09904487", "title": "Studying Early Decision Making with Progressive Bar Charts", "doi": null, "abstractUrl": "/journal/tg/2023/01/09904487/1H1geE4olvG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09904433", "title": "Evaluating the Use of Uncertainty Visualisations for Imputations of Data Missing At Random in Scatterplots", "doi": null, "abstractUrl": "/journal/tg/2023/01/09904433/1H1gkkbe0hy", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2022/9007/0/900700a073", "title": "A Flexible Pipeline to Create Different Types of Data Physicalizations", "doi": null, "abstractUrl": "/proceedings-article/iv/2022/900700a073/1KaH4gYfBzG", "parentPublication": { "id": "proceedings/iv/2022/9007/0", "title": "2022 26th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08807238", "title": "A Comparison of Radial and Linear Charts for Visualizing Daily Patterns", "doi": null, "abstractUrl": "/journal/tg/2020/01/08807238/1cG66qf6MKs", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2019/2838/0/283800a163", "title": "Proposal and Evaluation of Textual Description Templates for Bar Charts Vocalization", "doi": null, "abstractUrl": "/proceedings-article/iv/2019/283800a163/1cMFc4aDtWo", "parentPublication": { "id": "proceedings/iv/2019/2838/0", "title": "2019 23rd International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2019/2838/0/283800a151", "title": "The Cost of Pie Charts", "doi": null, "abstractUrl": "/proceedings-article/iv/2019/283800a151/1cMFcqwGM5q", "parentPublication": { "id": "proceedings/iv/2019/2838/0", "title": "2019 23rd International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09222047", "title": "Truth or Square: Aspect Ratio Biases Recall of Position Encodings", "doi": null, "abstractUrl": "/journal/tg/2021/02/09222047/1nTqj3fbFXq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/01/09552881", "title": "Modeling Just Noticeable Differences in Charts", "doi": null, "abstractUrl": "/journal/tg/2022/01/09552881/1xibXzMLm9i", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09552881", "articleId": "1xibXzMLm9i", "__typename": "AdjacentArticleType" }, "next": { "fno": "09552893", "articleId": "1xic1S53KPS", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1zBaq4GfZAI", "name": "ttg202201-09552227s1-tvcg-3114693-mm.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09552227s1-tvcg-3114693-mm.zip", "extension": "zip", "size": "11.5 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1zBamVZHyne", "title": "Jan.", "year": "2022", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1xic1S53KPS", "doi": "10.1109/TVCG.2021.3114796", "abstract": "Interactive visualization design and research have primarily focused on local data and synchronous events. However, for more complex use cases-e.g., remote database access and streaming data sources-developers must grapple with distributed data and asynchronous events. Currently, constructing these use cases is difficult and time-consuming; developers are forced to operationally program low-level details like asynchronous database querying and reactive event handling. This approach is in stark contrast to modern methods for browser-based interactive visualization, which feature high-level declarative specifications. In response, we present DIEL, a declarative framework that supports asynchronous events over distributed data. As in many declarative languages, DIEL developers specify only what data they want, rather than procedural steps for how to assemble it. Uniquely, DIEL models asynchronous events (e.g., user interactions, server responses) as streams of data that are captured in event logs. To specify the state of a visualization at any time, developers write declarative queries over the data and event logs; DIEL compiles and optimizes a corresponding dataflow graph, and automatically generates necessary low-level distributed systems details. We demonstrate DIEL&#x0027;S performance and expressivity through example interactive visualizations that make diverse use of remote data and asynchronous events. We further evaluate DIEL&#x0027;S usability using the Cognitive Dimensions of Notations framework, revealing wins such as ease of change, and compromises such as premature commitments.", "abstracts": [ { "abstractType": "Regular", "content": "Interactive visualization design and research have primarily focused on local data and synchronous events. However, for more complex use cases-e.g., remote database access and streaming data sources-developers must grapple with distributed data and asynchronous events. Currently, constructing these use cases is difficult and time-consuming; developers are forced to operationally program low-level details like asynchronous database querying and reactive event handling. This approach is in stark contrast to modern methods for browser-based interactive visualization, which feature high-level declarative specifications. In response, we present DIEL, a declarative framework that supports asynchronous events over distributed data. As in many declarative languages, DIEL developers specify only what data they want, rather than procedural steps for how to assemble it. Uniquely, DIEL models asynchronous events (e.g., user interactions, server responses) as streams of data that are captured in event logs. To specify the state of a visualization at any time, developers write declarative queries over the data and event logs; DIEL compiles and optimizes a corresponding dataflow graph, and automatically generates necessary low-level distributed systems details. We demonstrate DIEL&#x0027;S performance and expressivity through example interactive visualizations that make diverse use of remote data and asynchronous events. We further evaluate DIEL&#x0027;S usability using the Cognitive Dimensions of Notations framework, revealing wins such as ease of change, and compromises such as premature commitments.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Interactive visualization design and research have primarily focused on local data and synchronous events. However, for more complex use cases-e.g., remote database access and streaming data sources-developers must grapple with distributed data and asynchronous events. Currently, constructing these use cases is difficult and time-consuming; developers are forced to operationally program low-level details like asynchronous database querying and reactive event handling. This approach is in stark contrast to modern methods for browser-based interactive visualization, which feature high-level declarative specifications. In response, we present DIEL, a declarative framework that supports asynchronous events over distributed data. As in many declarative languages, DIEL developers specify only what data they want, rather than procedural steps for how to assemble it. Uniquely, DIEL models asynchronous events (e.g., user interactions, server responses) as streams of data that are captured in event logs. To specify the state of a visualization at any time, developers write declarative queries over the data and event logs; DIEL compiles and optimizes a corresponding dataflow graph, and automatically generates necessary low-level distributed systems details. We demonstrate DIEL'S performance and expressivity through example interactive visualizations that make diverse use of remote data and asynchronous events. We further evaluate DIEL'S usability using the Cognitive Dimensions of Notations framework, revealing wins such as ease of change, and compromises such as premature commitments.", "title": "DIEL: Interactive Visualization Beyond the Here and Now", "normalizedTitle": "DIEL: Interactive Visualization Beyond the Here and Now", "fno": "09552893", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualization", "Distributed Databases", "Databases", "Programming", "Libraries", "Visual Databases", "Servers", "Interactive Visualization Toolkit Library", "Scalability", "Asynchrony" ], "authors": [ { "givenName": "Yifan", "surname": "Wu", "fullName": "Yifan Wu", "affiliation": "University of California, Berkeley, United States", "__typename": "ArticleAuthorType" }, { "givenName": "Remco", "surname": "Chang", "fullName": "Remco Chang", "affiliation": "Tufts University, United States", "__typename": "ArticleAuthorType" }, { "givenName": "Joseph M.", "surname": "Hellerstein", "fullName": "Joseph M. Hellerstein", "affiliation": "University of California, Berkeley, United States", "__typename": "ArticleAuthorType" }, { "givenName": "Arvind", "surname": "Satyanarayan", "fullName": "Arvind Satyanarayan", "affiliation": "MIT CSAIL, United States", "__typename": "ArticleAuthorType" }, { "givenName": "Eugene", "surname": "Wu", "fullName": "Eugene Wu", "affiliation": "Columbia University, United States", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2022-01-01 00:00:00", "pubType": "trans", "pages": "737-746", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/pacificvis/2008/1966/0/04475450", "title": "Interactive Visualization - Beyond Standard Techniques for Irrelevant Datasets", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2008/04475450/12OmNBpVQ3r", "parentPublication": { "id": "proceedings/pacificvis/2008/1966/0", "title": "IEEE Pacific Visualization Symposium 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mass/1993/3460/0/00289779", "title": "Now that the storage train's here, can we get everybody on board?", "doi": null, "abstractUrl": "/proceedings-article/mass/1993/00289779/12OmNwdtwkl", "parentPublication": { "id": "proceedings/mass/1993/3460/0", "title": "Proceedings of 12th IEEE Symposium on Mass Storage Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vahc/2017/3187/0/08387496", "title": "DataScope: Interactive visual exploratory dashboards for large multidimensional data", "doi": null, "abstractUrl": "/proceedings-article/vahc/2017/08387496/12OmNx4Q6FM", "parentPublication": { "id": "proceedings/vahc/2017/3187/0", "title": "2017 IEEE Workshop on Visual Analytics in Healthcare (VAHC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/seaa/2010/7901/0/05598096", "title": "Automated Deployment of a Heterogeneous Service-Oriented System", "doi": null, "abstractUrl": "/proceedings-article/seaa/2010/05598096/12OmNzcPA2B", "parentPublication": { "id": "proceedings/seaa/2010/7901/0", "title": "2010 36th EUROMICRO Conference on Software Engineering and Advanced Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2016/08/07457691", "title": "Interactive Visualization of Large Data Sets", "doi": null, "abstractUrl": "/journal/tk/2016/08/07457691/13rRUwfZC0E", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/01/07192704", "title": "Reactive Vega: A Streaming Dataflow Architecture for Declarative Interactive Visualization", "doi": null, "abstractUrl": "/journal/tg/2016/01/07192704/13rRUx0gev9", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cloud/2018/7235/0/723501a326", "title": "Beyond Generic Lifecycles: Reusable Modeling of Custom-Fit Management Workflows for Cloud Applications", "doi": null, "abstractUrl": "/proceedings-article/cloud/2018/723501a326/13xI8AcUWLW", "parentPublication": { "id": "proceedings/cloud/2018/7235/0", "title": "2018 IEEE 11th International Conference on Cloud Computing (CLOUD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icoin/2019/8350/0/08718166", "title": "Distributed Key-Value Storage for Edge Computing and its Explicit Data Distribution Method", "doi": null, "abstractUrl": "/proceedings-article/icoin/2019/08718166/1aIS30FVkWs", "parentPublication": { "id": "proceedings/icoin/2019/8350/0", "title": "2019 International Conference on Information Networking (ICOIN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08809730", "title": "P5: Portable Progressive Parallel Processing Pipelines for Interactive Data Analysis and Visualization", "doi": null, "abstractUrl": "/journal/tg/2020/01/08809730/1cHE2tYwF7a", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09246282", "title": "P6: A Declarative Language for Integrating Machine Learning in Visual Analytics", "doi": null, "abstractUrl": "/journal/tg/2021/02/09246282/1olDLxl43Qc", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09552227", "articleId": "1xibX4wTR8Q", "__typename": "AdjacentArticleType" }, "next": { "fno": "09555620", "articleId": "1xlvYtlxuKY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1zBaGqWqVFu", "name": "ttg202201-09552893s1-supp1-3114796.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09552893s1-supp1-3114796.pdf", "extension": "pdf", "size": "262 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1zBamVZHyne", "title": "Jan.", "year": "2022", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1xlvYtlxuKY", "doi": "10.1109/TVCG.2021.3114841", "abstract": "Visualization collections, accessed by platforms such as Tableau Online or Power Bl, are used by millions of people to share and access diverse analytical knowledge in the form of interactive visualization bundles. Result snippets, compact previews of these bundles, are presented to users to help them identify relevant content when browsing collections. Our engagement with Tableau product teams and review of existing snippet designs on five platforms showed us that current practices fail to help people judge the relevance of bundles because they include only the title and one image. Users frequently need to undertake the time-consuming endeavour of opening a bundle within its visualization system to examine its many views and dashboards. In response, we contribute the first systematic approach to visualization snippet design. We propose a framework for snippet design that addresses eight key challenges that we identify. We present a computational pipeline to compress the visual and textual content of bundles into representative previews that is adaptive to a provided pixel budget and provides high information density with multiple images and carefully chosen keywords. We also reflect on the method of visual inspection through random sampling to gain confidence in model and parameter choices.", "abstracts": [ { "abstractType": "Regular", "content": "Visualization collections, accessed by platforms such as Tableau Online or Power Bl, are used by millions of people to share and access diverse analytical knowledge in the form of interactive visualization bundles. Result snippets, compact previews of these bundles, are presented to users to help them identify relevant content when browsing collections. Our engagement with Tableau product teams and review of existing snippet designs on five platforms showed us that current practices fail to help people judge the relevance of bundles because they include only the title and one image. Users frequently need to undertake the time-consuming endeavour of opening a bundle within its visualization system to examine its many views and dashboards. In response, we contribute the first systematic approach to visualization snippet design. We propose a framework for snippet design that addresses eight key challenges that we identify. We present a computational pipeline to compress the visual and textual content of bundles into representative previews that is adaptive to a provided pixel budget and provides high information density with multiple images and carefully chosen keywords. We also reflect on the method of visual inspection through random sampling to gain confidence in model and parameter choices.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Visualization collections, accessed by platforms such as Tableau Online or Power Bl, are used by millions of people to share and access diverse analytical knowledge in the form of interactive visualization bundles. Result snippets, compact previews of these bundles, are presented to users to help them identify relevant content when browsing collections. Our engagement with Tableau product teams and review of existing snippet designs on five platforms showed us that current practices fail to help people judge the relevance of bundles because they include only the title and one image. Users frequently need to undertake the time-consuming endeavour of opening a bundle within its visualization system to examine its many views and dashboards. In response, we contribute the first systematic approach to visualization snippet design. We propose a framework for snippet design that addresses eight key challenges that we identify. We present a computational pipeline to compress the visual and textual content of bundles into representative previews that is adaptive to a provided pixel budget and provides high information density with multiple images and carefully chosen keywords. We also reflect on the method of visual inspection through random sampling to gain confidence in model and parameter choices.", "title": "VizSnippets: Compressing Visualization Bundles Into Representative Previews for Browsing Visualization Collections", "normalizedTitle": "VizSnippets: Compressing Visualization Bundles Into Representative Previews for Browsing Visualization Collections", "fno": "09555620", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualization", "Visualization", "Tools", "Pipelines", "Layout", "Inspection", "Image Coding", "Visualization Collections", "Visualization Bundles", "Result Snippets", "Visual Inspection" ], "authors": [ { "givenName": "Michael", "surname": "Oppermann", "fullName": "Michael Oppermann", "affiliation": "University of British Columbia, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "Tamara", "surname": "Munzner", "fullName": "Tamara Munzner", "affiliation": "University of British Columbia, Canada", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2022-01-01 00:00:00", "pubType": "trans", "pages": "747-757", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iv/2014/4103/0/4103a001", "title": "Using Visual Cues on DOITree for Visualizing Large Hierarchical Data", "doi": null, "abstractUrl": "/proceedings-article/iv/2014/4103a001/12OmNBJNL1S", "parentPublication": { "id": "proceedings/iv/2014/4103/0", "title": "2014 18th International Conference on Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2015/6879/0/07156377", "title": "MetaTracts - A method for robust extraction and visualization of carbon fiber bundles in fiber reinforced composites", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2015/07156377/12OmNrYCXXM", "parentPublication": { "id": "proceedings/pacificvis/2015/6879/0", "title": "2015 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/apvis/2007/0808/0/04126231", "title": "Level-of-detail visualization of clustered graph layouts", "doi": null, "abstractUrl": "/proceedings-article/apvis/2007/04126231/12OmNzYeARq", "parentPublication": { "id": "proceedings/apvis/2007/0808/0", "title": "Asia-Pacific Symposium on Visualisation 2007", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/03/ttg2014030457", "title": "Similarity Preserving Snippet-Based Visualization of Web Search Results", "doi": null, "abstractUrl": "/journal/tg/2014/03/ttg2014030457/13rRUIIVlcJ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/08/06636295", "title": "Bundled Visualization of DynamicGraph and Trail Data", "doi": null, "abstractUrl": "/journal/tg/2014/08/06636295/13rRUwd9CG3", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/06/ttg2008061189", "title": "Graphical Histories for Visualization: Supporting Analysis, Communication, and Evaluation", "doi": null, "abstractUrl": "/journal/tg/2008/06/ttg2008061189/13rRUwwJWFI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/08/06025348", "title": "Hierarchical Streamline Bundles", "doi": null, "abstractUrl": "/journal/tg/2012/08/06025348/13rRUyY28Yt", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2018/7202/0/720200a368", "title": "Rule-Based Visualization of Tableau Calculus for Propositional Logic", "doi": null, "abstractUrl": "/proceedings-article/iv/2018/720200a368/17D45XDIXPk", "parentPublication": { "id": "proceedings/iv/2018/7202/0", "title": "2018 22nd International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09984953", "title": "VISAtlas: An Image-based Exploration and Query System for Large Visualization Collections via Neural Image Embedding", "doi": null, "abstractUrl": "/journal/tg/5555/01/09984953/1J6d2SwfUT6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2020/8014/0/801400a181", "title": "What are Data Insights to Professional Visualization Users?", "doi": null, "abstractUrl": "/proceedings-article/vis/2020/801400a181/1qRO9q8X6dq", "parentPublication": { "id": "proceedings/vis/2020/8014/0", "title": "2020 IEEE Visualization Conference (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09552893", "articleId": "1xic1S53KPS", "__typename": "AdjacentArticleType" }, "next": { "fno": "09555244", "articleId": "1xjR1QZtkTS", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1zBawzPLtxS", "name": "ttg202201-09555620s1-supp1-3114841.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09555620s1-supp1-3114841.mp4", "extension": "mp4", "size": "17.6 MB", "__typename": "WebExtraType" }, { "id": "1zBawt3WKas", "name": "ttg202201-09555620s1-supp2-3114841.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09555620s1-supp2-3114841.pdf", "extension": "pdf", "size": "10.8 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1zBamVZHyne", "title": "Jan.", "year": "2022", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1xjR1QZtkTS", "doi": "10.1109/TVCG.2021.3114807", "abstract": "Finding the similarities and differences between groups of datasets is a fundamental analysis task. For high-dimensional data, dimensionality reduction (DR) methods are often used to find the characteristics of each group. However, existing DR methods provide limited capability and flexibility for such comparative analysis as each method is designed only for a narrow analysis target, such as identifying factors that most differentiate groups. This paper presents an interactive DR framework where we integrate our new DR method, called ULCA (unified linear comparative analysis), with an interactive visual interface. ULCA unifies two DR schemes, discriminant analysis and contrastive learning, to support various comparative analysis tasks. To provide flexibility for comparative analysis, we develop an optimization algorithm that enables analysts to interactively refine ULCA results. Additionally, the interactive visualization interface facilitates interpretation and refinement of the ULCA results. We evaluate ULCA and the optimization algorithm to show their efficiency as well as present multiple case studies using real-world datasets to demonstrate the usefulness of this framework.", "abstracts": [ { "abstractType": "Regular", "content": "Finding the similarities and differences between groups of datasets is a fundamental analysis task. For high-dimensional data, dimensionality reduction (DR) methods are often used to find the characteristics of each group. However, existing DR methods provide limited capability and flexibility for such comparative analysis as each method is designed only for a narrow analysis target, such as identifying factors that most differentiate groups. This paper presents an interactive DR framework where we integrate our new DR method, called ULCA (unified linear comparative analysis), with an interactive visual interface. ULCA unifies two DR schemes, discriminant analysis and contrastive learning, to support various comparative analysis tasks. To provide flexibility for comparative analysis, we develop an optimization algorithm that enables analysts to interactively refine ULCA results. Additionally, the interactive visualization interface facilitates interpretation and refinement of the ULCA results. We evaluate ULCA and the optimization algorithm to show their efficiency as well as present multiple case studies using real-world datasets to demonstrate the usefulness of this framework.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Finding the similarities and differences between groups of datasets is a fundamental analysis task. For high-dimensional data, dimensionality reduction (DR) methods are often used to find the characteristics of each group. However, existing DR methods provide limited capability and flexibility for such comparative analysis as each method is designed only for a narrow analysis target, such as identifying factors that most differentiate groups. This paper presents an interactive DR framework where we integrate our new DR method, called ULCA (unified linear comparative analysis), with an interactive visual interface. ULCA unifies two DR schemes, discriminant analysis and contrastive learning, to support various comparative analysis tasks. To provide flexibility for comparative analysis, we develop an optimization algorithm that enables analysts to interactively refine ULCA results. Additionally, the interactive visualization interface facilitates interpretation and refinement of the ULCA results. We evaluate ULCA and the optimization algorithm to show their efficiency as well as present multiple case studies using real-world datasets to demonstrate the usefulness of this framework.", "title": "Interactive Dimensionality Reduction for Comparative Analysis", "normalizedTitle": "Interactive Dimensionality Reduction for Comparative Analysis", "fno": "09555244", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Principal Component Analysis", "Visualization", "Optimization", "Task Analysis", "Dimensionality Reduction", "Tools", "Libraries", "Dimensionality Reduction", "Discriminant Analysis", "Contrastive Learning", "Comparative Analysis", "Interpretability", "Visual Analytics" ], "authors": [ { "givenName": "Takanori", "surname": "Fujiwara", "fullName": "Takanori Fujiwara", "affiliation": "University of California, Davis, United States", "__typename": "ArticleAuthorType" }, { "givenName": "Xinhai", "surname": "Wei", "fullName": "Xinhai Wei", "affiliation": "University of Waterloo, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "Jian", "surname": "Zhao", "fullName": "Jian Zhao", "affiliation": "University of Waterloo, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "Kwan-Liu", "surname": "Ma", "fullName": "Kwan-Liu Ma", "affiliation": "University of California, Davis, United States", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2022-01-01 00:00:00", "pubType": "trans", "pages": "758-768", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2018/3788/0/08545659", "title": "Generalized Fisher Discriminant Analysis as A Dimensionality Reduction Technique", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08545659/17D45WK5Apy", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2018/3788/0/08546198", "title": "Maximum Gradient Dimensionality Reduction", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08546198/17D45XzbnLm", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09904480", "title": "Interactive Visual Cluster Analysis by Contrastive Dimensionality Reduction", "doi": null, "abstractUrl": "/journal/tg/2023/01/09904480/1H0GkV5P1qo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08805461", "title": "Supporting Analysis of Dimensionality Reduction Results with Contrastive Learning", "doi": null, "abstractUrl": "/journal/tg/2020/01/08805461/1cG4ulCK5S8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08809834", "title": "An Incremental Dimensionality Reduction Method for Visualizing Streaming Multidimensional Data", "doi": null, "abstractUrl": "/journal/tg/2020/01/08809834/1cHEiLzaKw8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2019/2838/0/283800a228", "title": "User-guided Dimensionality Reduction Ensembles", "doi": null, "abstractUrl": "/proceedings-article/iv/2019/283800a228/1cMF9VUpFgA", "parentPublication": { "id": "proceedings/iv/2019/2838/0", "title": "2019 23rd International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09216630", "title": "A Visual Analytics Framework for Reviewing Multivariate Time-Series Data with Dimensionality Reduction", "doi": null, "abstractUrl": "/journal/tg/2021/02/09216630/1nJsMUFa6f6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2020/8014/0/801400a111", "title": "DRUID<inf>JS</inf> &#x2014; A JavaScript Library for Dimensionality Reduction", "doi": null, "abstractUrl": "/proceedings-article/vis/2020/801400a111/1qRNP6eEG52", "parentPublication": { "id": "proceedings/vis/2020/8014/0", "title": "2020 IEEE Visualization Conference (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/01/09552226", "title": "Revisiting Dimensionality Reduction Techniques for Visual Cluster Analysis: An Empirical Study", "doi": null, "abstractUrl": "/journal/tg/2022/01/09552226/1xicaXrIayI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2021/3335/0/333500a026", "title": "Semantic Explanation of Interactive Dimensionality Reduction", "doi": null, "abstractUrl": "/proceedings-article/vis/2021/333500a026/1yXuftZECbe", "parentPublication": { "id": "proceedings/vis/2021/3335/0", "title": "2021 IEEE Visualization Conference (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09555620", "articleId": "1xlvYtlxuKY", "__typename": "AdjacentArticleType" }, "next": { "fno": "09552237", "articleId": "1xic2ZoShgI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1zJiHpIFACA", "name": "ttg202201-09555244s1-supp1-3114807.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09555244s1-supp1-3114807.mp4", "extension": "mp4", "size": "65.3 MB", "__typename": "WebExtraType" }, { "id": "1zJiH4K3yCY", "name": "ttg202201-09555244s1-supp2-3114807.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09555244s1-supp2-3114807.pdf", "extension": "pdf", "size": "1.92 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1zBamVZHyne", "title": "Jan.", "year": "2022", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1xic2ZoShgI", "doi": "10.1109/TVCG.2021.3114784", "abstract": "We present an approach utilizing Topological Data Analysis to study the structure of face poses used in affective computing, i.e., the process of recognizing human emotion. The approach uses a conditional comparison of different emotions, both respective and irrespective of time, with multiple topological distance metrics, dimension reduction techniques, and face subsections (e.g., eyes, nose, mouth, etc.). The results confirm that our topology-based approach captures known patterns, distinctions between emotions, and distinctions between individuals, which is an important step towards more robust and explainable emotion recognition by machines.", "abstracts": [ { "abstractType": "Regular", "content": "We present an approach utilizing Topological Data Analysis to study the structure of face poses used in affective computing, i.e., the process of recognizing human emotion. The approach uses a conditional comparison of different emotions, both respective and irrespective of time, with multiple topological distance metrics, dimension reduction techniques, and face subsections (e.g., eyes, nose, mouth, etc.). The results confirm that our topology-based approach captures known patterns, distinctions between emotions, and distinctions between individuals, which is an important step towards more robust and explainable emotion recognition by machines.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present an approach utilizing Topological Data Analysis to study the structure of face poses used in affective computing, i.e., the process of recognizing human emotion. The approach uses a conditional comparison of different emotions, both respective and irrespective of time, with multiple topological distance metrics, dimension reduction techniques, and face subsections (e.g., eyes, nose, mouth, etc.). The results confirm that our topology-based approach captures known patterns, distinctions between emotions, and distinctions between individuals, which is an important step towards more robust and explainable emotion recognition by machines.", "title": "AffectiveTDA: Using Topological Data Analysis to Improve Analysis and Explainability in Affective Computing", "normalizedTitle": "AffectiveTDA: Using Topological Data Analysis to Improve Analysis and Explainability in Affective Computing", "fno": "09552237", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Face Recognition", "Affective Computing", "Three Dimensional Displays", "Topology", "Feature Extraction", "Data Visualization", "Data Analysis", "Affective Computing", "Topological Data Analysis", "Explainability", "Visualization" ], "authors": [ { "givenName": "Hamza", "surname": "Elhamdadi", "fullName": "Hamza Elhamdadi", "affiliation": "University of Massachusetts, Amherst, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Shaun", "surname": "Canavan", "fullName": "Shaun Canavan", "affiliation": "University of South Florida, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Paul", "surname": "Rosen", "fullName": "Paul Rosen", "affiliation": "University of South Florida, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2022-01-01 00:00:00", "pubType": "trans", "pages": "769-779", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icdm/2009/3895/0/3895a699", "title": "Joint Emotion-Topic Modeling for Social Affective Text Mining", "doi": null, "abstractUrl": "/proceedings-article/icdm/2009/3895a699/12OmNqBbHEo", "parentPublication": { "id": "proceedings/icdm/2009/3895/0", "title": "2009 Ninth IEEE International Conference on Data Mining", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csse/2008/3336/1/3336a459", "title": "The Research on User Modeling for Personalized Affective Computing", "doi": null, "abstractUrl": "/proceedings-article/csse/2008/3336a459/12OmNxFJXVm", "parentPublication": { "id": "proceedings/csse/2008/3336/1", "title": "Computer Science and Software Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2009/3890/0/3890a527", "title": "Towards a Ground Truth for Affective Classification in Movies", "doi": null, "abstractUrl": "/proceedings-article/ism/2009/3890a527/12OmNyY4rpb", "parentPublication": { "id": "proceedings/ism/2009/3890/0", "title": "2009 11th IEEE International Symposium on Multimedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cimca/2008/3514/0/3514a849", "title": "Semantic Classifier for Affective Computing", "doi": null, "abstractUrl": "/proceedings-article/cimca/2008/3514a849/12OmNyo1nMK", "parentPublication": { "id": "proceedings/cimca/2008/3514/0", "title": "2008 International Conference on Computational Intelligence for Modelling Control &amp; Automation (CIMCA 2008)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2012/09/ttk2012091658", "title": "Mining Social Emotions from Affective Text", "doi": null, "abstractUrl": "/journal/tk/2012/09/ttk2012091658/13rRUwvBy9f", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/ex/2016/02/mex2016020102", "title": "Affective Computing and Sentiment Analysis", "doi": null, "abstractUrl": "/magazine/ex/2016/02/mex2016020102/13rRUzpQPQ4", "parentPublication": { "id": "mags/ex", "title": "IEEE Intelligent Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2022/5908/0/09953864", "title": "Interpretable Explainability in Facial Emotion Recognition and Gamification for Data Collection", "doi": null, "abstractUrl": "/proceedings-article/acii/2022/09953864/1IAK5ikKDPW", "parentPublication": { "id": "proceedings/acii/2022/5908/0", "title": "2022 10th International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aciiw/2022/5490/0/10085993", "title": "Context-Dependent Deep Learning for Affective Computing", "doi": null, "abstractUrl": "/proceedings-article/aciiw/2022/10085993/1M669OPOQzm", "parentPublication": { "id": "proceedings/aciiw/2022/5490/0", "title": "2022 10th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/10/09472932", "title": "Affective Image Content Analysis: Two Decades Review and New Perspectives", "doi": null, "abstractUrl": "/journal/tp/2022/10/09472932/1uUtvpP3SsE", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccnea/2021/4486/0/448600a046", "title": "Research on Affective Computing Based on Self-Awareness", "doi": null, "abstractUrl": "/proceedings-article/iccnea/2021/448600a046/1yEZq7jjIYg", "parentPublication": { "id": "proceedings/iccnea/2021/4486/0", "title": "2021 International Conference on Computer Network, Electronic and Automation (ICCNEA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09555244", "articleId": "1xjR1QZtkTS", "__typename": "AdjacentArticleType" }, "next": { "fno": "09552218", "articleId": "1xic3hb4Hle", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1zJiFOQRWHm", "name": "ttg202201-09552237s1-supp1-3114784.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09552237s1-supp1-3114784.mp4", "extension": "mp4", "size": "50.1 MB", "__typename": "WebExtraType" }, { "id": "1zJiFz29Qis", "name": "ttg202201-09552237s1-supp2-3114784.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09552237s1-supp2-3114784.pdf", "extension": "pdf", "size": "4.79 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1zBamVZHyne", "title": "Jan.", "year": "2022", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1xic3hb4Hle", "doi": "10.1109/TVCG.2021.3114837", "abstract": "The interpretation of deep neural networks (DNNs) has become a key topic as more and more people apply them to solve various problems and making critical decisions. Concept-based explanations have recently become a popular approach for post-hoc interpretation of DNNs. However, identifying human-understandable visual concepts that affect model decisions is a challenging task that is not easily addressed with automatic approaches. We present a novel human-in-the-Ioop approach to generate user-defined concepts for model interpretation and diagnostics. Central to our proposal is the use of active learning, where human knowledge and feedback are combined to train a concept extractor with very little human labeling effort. We integrate this process into an interactive system, ConceptExtract. Through two case studies, we show how our approach helps analyze model behavior and extract human-friendly concepts for different machine learning tasks and datasets and how to use these concepts to understand the predictions, compare model performance and make suggestions for model refinement. Quantitative experiments show that our active learning approach can accurately extract meaningful visual concepts. More importantly, by identifying visual concepts that negatively affect model performance, we develop the corresponding data augmentation strategy that consistently improves model performance.", "abstracts": [ { "abstractType": "Regular", "content": "The interpretation of deep neural networks (DNNs) has become a key topic as more and more people apply them to solve various problems and making critical decisions. Concept-based explanations have recently become a popular approach for post-hoc interpretation of DNNs. However, identifying human-understandable visual concepts that affect model decisions is a challenging task that is not easily addressed with automatic approaches. We present a novel human-in-the-Ioop approach to generate user-defined concepts for model interpretation and diagnostics. Central to our proposal is the use of active learning, where human knowledge and feedback are combined to train a concept extractor with very little human labeling effort. We integrate this process into an interactive system, ConceptExtract. Through two case studies, we show how our approach helps analyze model behavior and extract human-friendly concepts for different machine learning tasks and datasets and how to use these concepts to understand the predictions, compare model performance and make suggestions for model refinement. Quantitative experiments show that our active learning approach can accurately extract meaningful visual concepts. More importantly, by identifying visual concepts that negatively affect model performance, we develop the corresponding data augmentation strategy that consistently improves model performance.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The interpretation of deep neural networks (DNNs) has become a key topic as more and more people apply them to solve various problems and making critical decisions. Concept-based explanations have recently become a popular approach for post-hoc interpretation of DNNs. However, identifying human-understandable visual concepts that affect model decisions is a challenging task that is not easily addressed with automatic approaches. We present a novel human-in-the-Ioop approach to generate user-defined concepts for model interpretation and diagnostics. Central to our proposal is the use of active learning, where human knowledge and feedback are combined to train a concept extractor with very little human labeling effort. We integrate this process into an interactive system, ConceptExtract. Through two case studies, we show how our approach helps analyze model behavior and extract human-friendly concepts for different machine learning tasks and datasets and how to use these concepts to understand the predictions, compare model performance and make suggestions for model refinement. Quantitative experiments show that our active learning approach can accurately extract meaningful visual concepts. More importantly, by identifying visual concepts that negatively affect model performance, we develop the corresponding data augmentation strategy that consistently improves model performance.", "title": "Human-in-the-loop Extraction of Interpretable Concepts in Deep Learning Models", "normalizedTitle": "Human-in-the-loop Extraction of Interpretable Concepts in Deep Learning Models", "fno": "09552218", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Visualization", "Data Models", "Analytical Models", "Predictive Models", "Computational Modeling", "Deep Learning", "Task Analysis", "Visual Data Exploration", "Deep Neural Network", "Model Interpretation", "Explainable AI" ], "authors": [ { "givenName": "Zhenge", "surname": "Zhao", "fullName": "Zhenge Zhao", "affiliation": "University of Arizona, United States", "__typename": "ArticleAuthorType" }, { "givenName": "Panpan", "surname": "Xu", "fullName": "Panpan Xu", "affiliation": "Amazon AWS AI, United States", "__typename": "ArticleAuthorType" }, { "givenName": "Carlos", "surname": "Scheidegger", "fullName": "Carlos Scheidegger", "affiliation": "University of Arizona, United States", "__typename": "ArticleAuthorType" }, { "givenName": "Liu", "surname": "Ren", "fullName": "Liu Ren", "affiliation": "Bosch Research North America, United States", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2022-01-01 00:00:00", "pubType": "trans", "pages": "780-790", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tg/2019/06/08667661", "title": "<italic>DeepVID</italic>: Deep Visual Interpretation and Diagnosis for Image Classifiers via Knowledge Distillation", "doi": null, "abstractUrl": "/journal/tg/2019/06/08667661/18q6nouFfmo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ec/2022/01/09713986", "title": "HMCKRAutoEncoder: An Interpretable Deep Learning Framework for Time Series Analysis", "doi": null, "abstractUrl": "/journal/ec/2022/01/09713986/1AZLECUeBzy", "parentPublication": { "id": "trans/ec", "title": "IEEE Transactions on Emerging Topics in Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse/2022/9221/0/922100b519", "title": "NeuronFair: Interpretable White-Box Fairness Testing through Biased Neuron Identification", "doi": null, "abstractUrl": "/proceedings-article/icse/2022/922100b519/1Ems0oUChe8", "parentPublication": { "id": "proceedings/icse/2022/9221/0", "title": "2022 IEEE/ACM 44th International Conference on Software Engineering (ICSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/5555/01/09896988", "title": "Human-in-the-Loop Rule Discovery for Micropost Event Detection", "doi": null, "abstractUrl": "/journal/tk/5555/01/09896988/1GQIASvIcsU", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/5555/01/09904301", "title": "Concept-Level Model Interpretation From the Causal Aspect", "doi": null, "abstractUrl": "/journal/tk/5555/01/09904301/1H0G26Jum4g", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/5555/01/09964439", "title": "Interpretable by Design: Learning Predictors by Composing Interpretable Queries", "doi": null, "abstractUrl": "/journal/tp/5555/01/09964439/1IFEEJ0hSCs", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icict/2020/7283/0/728300a078", "title": "New Perspective of Interpretability of Deep Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/icict/2020/728300a078/1jPb572WT84", "parentPublication": { "id": "proceedings/icict/2020/7283/0", "title": "2020 3rd International Conference on Information and Computer Technologies (ICICT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/06/09310358", "title": "Self-Correction for Human Parsing", "doi": null, "abstractUrl": "/journal/tp/2022/06/09310358/1pXhJmyvZAI", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2020/6215/0/09313119", "title": "IB-M: A Flexible Framework to Align an Interpretable Model and a Black-box Model", "doi": null, "abstractUrl": "/proceedings-article/bibm/2020/09313119/1qmgdEr4z2E", "parentPublication": { "id": "proceedings/bibm/2020/6215/0", "title": "2020 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icnp/2021/4131/0/09651937", "title": "Generalizable and Interpretable Deep Learning for Network Congestion Prediction", "doi": null, "abstractUrl": "/proceedings-article/icnp/2021/09651937/1zHIHg28C7m", "parentPublication": { "id": "proceedings/icnp/2021/4131/0", "title": "2021 IEEE 29th International Conference on Network Protocols (ICNP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09552237", "articleId": "1xic2ZoShgI", "__typename": "AdjacentArticleType" }, "next": { "fno": "09552842", "articleId": "1xic0JWIDfy", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1zBaKnQQZMI", "name": "ttg202201-09552218s1-supp2-3114837.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09552218s1-supp2-3114837.mp4", "extension": "mp4", "size": "123 MB", "__typename": "WebExtraType" }, { "id": "1zBaL34WQ5G", "name": "ttg202201-09552218s1-supp1-3114837.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09552218s1-supp1-3114837.pdf", "extension": "pdf", "size": "296 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1zBamVZHyne", "title": "Jan.", "year": "2022", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1xic0JWIDfy", "doi": "10.1109/TVCG.2021.3114793", "abstract": "Zero-shot classification is a promising paradigm to solve an applicable problem when the training classes and test classes are disjoint. Achieving this usually needs experts to externalize their domain knowledge by manually specifying a class-attribute matrix to define which classes have which attributes. Designing a suitable class-attribute matrix is the key to the subsequent procedure, but this design process is tedious and trial-and-error with no guidance. This paper proposes a visual explainable active learning approach with its design and implementation called semantic navigator to solve the above problems. This approach promotes human-AI teaming with four actions (ask, explain, recommend, respond) in each interaction loop. The machine asks contrastive questions to guide humans in the thinking process of attributes. A novel visualization called semantic map explains the current status of the machine. Therefore analysts can better understand why the machine misclassifies objects. Moreover, the machine recommends the labels of classes for each attribute to ease the labeling burden. Finally, humans can steer the model by modifying the labels interactively, and the machine adjusts its recommendations. The visual explainable active learning approach improves humans&#x0027; efficiency of building zero-shot classification models interactively, compared with the method without guidance. We justify our results with user studies using the standard benchmarks for zero-shot classification.", "abstracts": [ { "abstractType": "Regular", "content": "Zero-shot classification is a promising paradigm to solve an applicable problem when the training classes and test classes are disjoint. Achieving this usually needs experts to externalize their domain knowledge by manually specifying a class-attribute matrix to define which classes have which attributes. Designing a suitable class-attribute matrix is the key to the subsequent procedure, but this design process is tedious and trial-and-error with no guidance. This paper proposes a visual explainable active learning approach with its design and implementation called semantic navigator to solve the above problems. This approach promotes human-AI teaming with four actions (ask, explain, recommend, respond) in each interaction loop. The machine asks contrastive questions to guide humans in the thinking process of attributes. A novel visualization called semantic map explains the current status of the machine. Therefore analysts can better understand why the machine misclassifies objects. Moreover, the machine recommends the labels of classes for each attribute to ease the labeling burden. Finally, humans can steer the model by modifying the labels interactively, and the machine adjusts its recommendations. The visual explainable active learning approach improves humans&#x0027; efficiency of building zero-shot classification models interactively, compared with the method without guidance. We justify our results with user studies using the standard benchmarks for zero-shot classification.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Zero-shot classification is a promising paradigm to solve an applicable problem when the training classes and test classes are disjoint. Achieving this usually needs experts to externalize their domain knowledge by manually specifying a class-attribute matrix to define which classes have which attributes. Designing a suitable class-attribute matrix is the key to the subsequent procedure, but this design process is tedious and trial-and-error with no guidance. This paper proposes a visual explainable active learning approach with its design and implementation called semantic navigator to solve the above problems. This approach promotes human-AI teaming with four actions (ask, explain, recommend, respond) in each interaction loop. The machine asks contrastive questions to guide humans in the thinking process of attributes. A novel visualization called semantic map explains the current status of the machine. Therefore analysts can better understand why the machine misclassifies objects. Moreover, the machine recommends the labels of classes for each attribute to ease the labeling burden. Finally, humans can steer the model by modifying the labels interactively, and the machine adjusts its recommendations. The visual explainable active learning approach improves humans' efficiency of building zero-shot classification models interactively, compared with the method without guidance. We justify our results with user studies using the standard benchmarks for zero-shot classification.", "title": "Towards Visual Explainable Active Learning for Zero-Shot Classification", "normalizedTitle": "Towards Visual Explainable Active Learning for Zero-Shot Classification", "fno": "09552842", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualisation", "Image Classification", "Image Representation", "Learning Artificial Intelligence", "Pattern Classification", "Visual Databases", "Visual Explainable Active Learning Approach", "Human AI Teaming", "Zero Shot Classification Models", "Applicable Problem", "Training Classes", "Test Classes", "Suitable Class Attribute Matrix", "Design Process", "Semantics", "Labeling", "Training", "Visual Analytics", "Testing", "Task Analysis", "Navigation", "Active Learning", "Explainable Artificial Intelligence", "Human AI Teaming", "Mixed Initiative Visual Analytics" ], "authors": [ { "givenName": "Shichao", "surname": "Jia", "fullName": "Shichao Jia", "affiliation": "College of Intelligence and Computing, Tianjin University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Zeyu", "surname": "Li", "fullName": "Zeyu Li", "affiliation": "College of Intelligence and Computing, Tianjin University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Nuo", "surname": "Chen", "fullName": "Nuo Chen", "affiliation": "College of Intelligence and Computing, Tianjin University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Jiawan", "surname": "Zhang", "fullName": "Jiawan Zhang", "affiliation": "College of Intelligence and Computing, Tianjin University, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2022-01-01 00:00:00", "pubType": "trans", "pages": "791-801", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2016/8851/0/8851a059", "title": "Multi-cue Zero-Shot Learning with Strong Supervision", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/8851a059/12OmNBcAGKE", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2014/5118/0/5118c441", "title": "COSTA: Co-Occurrence Statistics for Zero-Shot Classification", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118c441/12OmNqGA50v", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/08237715", "title": "Learning Discriminative Latent Attributes for Zero-Shot Classification", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/08237715/12OmNy6qfNL", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2014/03/ttp2014030453", "title": "Attribute-Based Classification for Zero-Shot Visual Object Categorization", "doi": null, "abstractUrl": "/journal/tp/2014/03/ttp2014030453/13rRUILtJsd", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000h670", "title": "Zero-Shot Kernel Learning", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000h670/17D45WwsQ8j", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2018/3788/0/08545869", "title": "Hard Zero Shot Learning for Gesture Recognition", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08545869/17D45Xh13t4", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2023/01/09681230", "title": "Towards Zero-Shot Sign Language Recognition", "doi": null, "abstractUrl": "/journal/tp/2023/01/09681230/1A8c6K0vAnm", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900d930", "title": "Zero-shot Learning Using Multimodal Descriptions", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900d930/1G56OtiHKUg", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300f783", "title": "Creativity Inspired Zero-Shot Learning", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300f783/1hVlAFirKeY", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2020/8014/0/801400a251", "title": "Visually Analyzing and Steering Zero Shot Learning", "doi": null, "abstractUrl": "/proceedings-article/vis/2020/801400a251/1qRNQGu8try", "parentPublication": { "id": "proceedings/vis/2020/8014/0", "title": "2020 IEEE Visualization Conference (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09552218", "articleId": "1xic3hb4Hle", "__typename": "AdjacentArticleType" }, "next": { "fno": "09552921", "articleId": "1xic8w3ygrm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1zBamVZHyne", "title": "Jan.", "year": "2022", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1xic8w3ygrm", "doi": "10.1109/TVCG.2021.3114794", "abstract": "Multimodal sentiment analysis aims to recognize people's attitudes from multiple communication channels such as verbal content (i.e., text), voice, and facial expressions. It has become a vibrant and important research topic in natural language processing. Much research focuses on modeling the complex intra- and inter-modal interactions between different communication channels. However, current multimodal models with strong performance are often deep-learning-based techniques and work like black boxes. It is not clear how models utilize multimodal information for sentiment predictions. Despite recent advances in techniques for enhancing the explainability of machine learning models, they often target unimodal scenarios (e.g., images, sentences), and little research has been done on explaining multimodal models. In this paper, we present an interactive visual analytics system, M2 Lens, to visualize and explain multimodal models for sentiment analysis. M2 Lens provides explanations on intra- and inter-modal interactions at the global, subset, and local levels. Specifically, it summarizes the influence of three typical interaction types (i.e., dominance, complement, and conflict) on the model predictions. Moreover, M2 Lens identifies frequent and influential multimodal features and supports the multi-faceted exploration of model behaviors from language, acoustic, and visual modalities. Through two case studies and expert interviews, we demonstrate our system can help users gain deep insights into the multimodal models for sentiment analysis.", "abstracts": [ { "abstractType": "Regular", "content": "Multimodal sentiment analysis aims to recognize people's attitudes from multiple communication channels such as verbal content (i.e., text), voice, and facial expressions. It has become a vibrant and important research topic in natural language processing. Much research focuses on modeling the complex intra- and inter-modal interactions between different communication channels. However, current multimodal models with strong performance are often deep-learning-based techniques and work like black boxes. It is not clear how models utilize multimodal information for sentiment predictions. Despite recent advances in techniques for enhancing the explainability of machine learning models, they often target unimodal scenarios (e.g., images, sentences), and little research has been done on explaining multimodal models. In this paper, we present an interactive visual analytics system, M2 Lens, to visualize and explain multimodal models for sentiment analysis. M2 Lens provides explanations on intra- and inter-modal interactions at the global, subset, and local levels. Specifically, it summarizes the influence of three typical interaction types (i.e., dominance, complement, and conflict) on the model predictions. Moreover, M2 Lens identifies frequent and influential multimodal features and supports the multi-faceted exploration of model behaviors from language, acoustic, and visual modalities. Through two case studies and expert interviews, we demonstrate our system can help users gain deep insights into the multimodal models for sentiment analysis.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Multimodal sentiment analysis aims to recognize people's attitudes from multiple communication channels such as verbal content (i.e., text), voice, and facial expressions. It has become a vibrant and important research topic in natural language processing. Much research focuses on modeling the complex intra- and inter-modal interactions between different communication channels. However, current multimodal models with strong performance are often deep-learning-based techniques and work like black boxes. It is not clear how models utilize multimodal information for sentiment predictions. Despite recent advances in techniques for enhancing the explainability of machine learning models, they often target unimodal scenarios (e.g., images, sentences), and little research has been done on explaining multimodal models. In this paper, we present an interactive visual analytics system, M2 Lens, to visualize and explain multimodal models for sentiment analysis. M2 Lens provides explanations on intra- and inter-modal interactions at the global, subset, and local levels. Specifically, it summarizes the influence of three typical interaction types (i.e., dominance, complement, and conflict) on the model predictions. Moreover, M2 Lens identifies frequent and influential multimodal features and supports the multi-faceted exploration of model behaviors from language, acoustic, and visual modalities. Through two case studies and expert interviews, we demonstrate our system can help users gain deep insights into the multimodal models for sentiment analysis.", "title": "M2Lens: Visualizing and Explaining Multimodal Models for Sentiment Analysis", "normalizedTitle": "M2Lens: Visualizing and Explaining Multimodal Models for Sentiment Analysis", "fno": "09552921", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Analysis", "Data Mining", "Data Visualisation", "Emotion Recognition", "Learning Artificial Intelligence", "Natural Language Processing", "Text Analysis", "Video Signal Processing", "Explainability", "Machine Learning Models", "Explaining Multimodal Models", "Interactive Visual Analytics System", "M 2 Lens", "Inter Modal Interactions", "Model Predictions", "Multimodal Features", "Model Behaviors", "Visual Modalities", "M 2 Lens", "Multimodal Sentiment Analysis", "Multiple Communication Channels", "Vibrant Research Topic", "Natural Language Processing", "Different Communication Channels", "Current Multimodal Models", "Multimodal Information", "Sentiment Predictions", "Analytical Models", "Sentiment Analysis", "Computational Modeling", "Predictive Models", "Data Models", "Lenses", "Communication Channels", "Multimodal Models", "Sentiment Analysis", "Explainable Machine Learning" ], "authors": [ { "givenName": "Xingbo", "surname": "Wang", "fullName": "Xingbo Wang", "affiliation": "University of Science and Technology, United States", "__typename": "ArticleAuthorType" }, { "givenName": "Jianben", "surname": "He", "fullName": "Jianben He", "affiliation": "University of Science and Technology, United States", "__typename": "ArticleAuthorType" }, { "givenName": "Zhihua", "surname": "Jin", "fullName": "Zhihua Jin", "affiliation": "University of Science and Technology, United States", "__typename": "ArticleAuthorType" }, { "givenName": "Muqiao", "surname": "Yang", "fullName": "Muqiao Yang", "affiliation": "Carnegie Mellon University, United States", "__typename": "ArticleAuthorType" }, { "givenName": "Yong", "surname": "Wang", "fullName": "Yong Wang", "affiliation": "Carnegie Mellon University, United States", "__typename": "ArticleAuthorType" }, { "givenName": "Huamin", "surname": "Qu", "fullName": "Huamin Qu", "affiliation": "University of Science and Technology, United States", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2022-01-01 00:00:00", "pubType": "trans", "pages": "802-812", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icme/2017/6067/0/08019301", "title": "Select-additive learning: Improving generalization in multimodal sentiment analysis", "doi": null, "abstractUrl": "/proceedings-article/icme/2017/08019301/12OmNyuPKYX", "parentPublication": { "id": "proceedings/icme/2017/6067/0", "title": "2017 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/ex/2018/06/08636432", "title": "Multimodal Sentiment Analysis: Addressing Key Issues and Setting Up the Baselines", "doi": null, "abstractUrl": "/magazine/ex/2018/06/08636432/17D45XeKgoe", "parentPublication": { "id": "mags/ex", "title": "IEEE Intelligent Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/5555/01/09726810", "title": "Effects of Physiological Signals in Different Types of Multimodal Sentiment Estimation", "doi": null, "abstractUrl": "/journal/ta/5555/01/09726810/1BrwhpnaEpi", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2022/7218/0/09859289", "title": "Demusa: Demo for Multimodal Sentiment Analysis", "doi": null, "abstractUrl": "/proceedings-article/icmew/2022/09859289/1G4EX2l1hVC", "parentPublication": { "id": "proceedings/icmew/2022/7218/0", "title": "2022 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09860014", "title": "Utilizing BERT Intermediate Layers for Multimodal Sentiment Analysis", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09860014/1G9EKqcyxmU", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859747", "title": "HMAI-BERT: Hierarchical Multimodal Alignment and Interaction Network-Enhanced BERT for Multimodal Sentiment Analysis", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859747/1G9EbGAd56w", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956114", "title": "IMCN: Identifying Modal Contribution Network for Multimodal Sentiment Analysis", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956114/1IHoIacv1uM", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ec/5555/01/10014688", "title": "Affective Region Recognition and Fusion Network for Target-Level Multimodal Sentiment Classification", "doi": null, "abstractUrl": "/journal/ec/5555/01/10014688/1JP1By8yIP6", "parentPublication": { "id": "trans/ec", "title": "IEEE Transactions on Emerging Topics in Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icftic/2022/2195/0/10075170", "title": "A multimodal sentiment analysis model based on semantic enrichment", "doi": null, "abstractUrl": "/proceedings-article/icftic/2022/10075170/1LRlklPXkac", "parentPublication": { "id": "proceedings/icftic/2022/2195/0", "title": "2022 4th International Conference on Frontiers Technology of Information and Computer (ICFTIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bcd/2019/0886/0/08885108", "title": "Multimodal Sentiment Analysis via RNN variants", "doi": null, "abstractUrl": "/proceedings-article/bcd/2019/08885108/1ezS0p24e3K", "parentPublication": { "id": "proceedings/bcd/2019/0886/0", "title": "2019 IEEE International Conference on Big Data, Cloud Computing, Data Science & Engineering (BCD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09552842", "articleId": "1xic0JWIDfy", "__typename": "AdjacentArticleType" }, "next": { "fno": "09552879", "articleId": "1xibY2EaE80", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1zBaZWGyvAY", "name": "ttg202201-09552921s1-supp1-3114794.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09552921s1-supp1-3114794.mp4", "extension": "mp4", "size": "30.7 MB", "__typename": "WebExtraType" }, { "id": "1zBaZLHjdq8", "name": "ttg202201-09552921s1-supp2-3114794.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09552921s1-supp2-3114794.pdf", "extension": "pdf", "size": "784 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1zBamVZHyne", "title": "Jan.", "year": "2022", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1xibY2EaE80", "doi": "10.1109/TVCG.2021.3114858", "abstract": "Existing research on making sense of deep neural networks often focuses on neuron-level interpretation, which may not adequately capture the bigger picture of how concepts are collectively encoded by multiple neurons. We present Neurocartography, an interactive system that scalably summarizes and visualizes concepts learned by neural networks. It automatically discovers and groups neurons that detect the same concepts, and describes how such neuron groups interact to form higher-level concepts and the subsequent predictions. Neurocartography introduces two scalable summarization techniques: (1) neuron clustering groups neurons based on the semantic similarity of the concepts detected by neurons (e.g., neurons detecting “dog faces” of different breeds are grouped); and (2) neuron embedding encodes the associations between related concepts based on how often they co-occur (e.g., neurons detecting “dog face” and “dog tail” are placed closer in the embedding space). Key to our scalable techniques is the ability to efficiently compute all neuron pairs' relationships, in time linear to the number of neurons instead of quadratic time. Neurocartography scales to large data, such as the ImageNet dataset with 1.2M images. The system's tightly coordinated views integrate the scalable techniques to visualize the concepts and their relationships, projecting the concept associations to a 2D space in Neuron Projection View, and summarizing neuron clusters and their relationships in Graph View. Through a large-scale human evaluation, we demonstrate that our technique discovers neuron groups that represent coherent, human-meaningful concepts. And through usage scenarios, we describe how our approaches enable interesting and surprising discoveries, such as concept cascades of related and isolated concepts. The Neurocartography visualization runs in modern browsers and is open-sourced.", "abstracts": [ { "abstractType": "Regular", "content": "Existing research on making sense of deep neural networks often focuses on neuron-level interpretation, which may not adequately capture the bigger picture of how concepts are collectively encoded by multiple neurons. We present Neurocartography, an interactive system that scalably summarizes and visualizes concepts learned by neural networks. It automatically discovers and groups neurons that detect the same concepts, and describes how such neuron groups interact to form higher-level concepts and the subsequent predictions. Neurocartography introduces two scalable summarization techniques: (1) neuron clustering groups neurons based on the semantic similarity of the concepts detected by neurons (e.g., neurons detecting “dog faces” of different breeds are grouped); and (2) neuron embedding encodes the associations between related concepts based on how often they co-occur (e.g., neurons detecting “dog face” and “dog tail” are placed closer in the embedding space). Key to our scalable techniques is the ability to efficiently compute all neuron pairs' relationships, in time linear to the number of neurons instead of quadratic time. Neurocartography scales to large data, such as the ImageNet dataset with 1.2M images. The system's tightly coordinated views integrate the scalable techniques to visualize the concepts and their relationships, projecting the concept associations to a 2D space in Neuron Projection View, and summarizing neuron clusters and their relationships in Graph View. Through a large-scale human evaluation, we demonstrate that our technique discovers neuron groups that represent coherent, human-meaningful concepts. And through usage scenarios, we describe how our approaches enable interesting and surprising discoveries, such as concept cascades of related and isolated concepts. The Neurocartography visualization runs in modern browsers and is open-sourced.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Existing research on making sense of deep neural networks often focuses on neuron-level interpretation, which may not adequately capture the bigger picture of how concepts are collectively encoded by multiple neurons. We present Neurocartography, an interactive system that scalably summarizes and visualizes concepts learned by neural networks. It automatically discovers and groups neurons that detect the same concepts, and describes how such neuron groups interact to form higher-level concepts and the subsequent predictions. Neurocartography introduces two scalable summarization techniques: (1) neuron clustering groups neurons based on the semantic similarity of the concepts detected by neurons (e.g., neurons detecting “dog faces” of different breeds are grouped); and (2) neuron embedding encodes the associations between related concepts based on how often they co-occur (e.g., neurons detecting “dog face” and “dog tail” are placed closer in the embedding space). Key to our scalable techniques is the ability to efficiently compute all neuron pairs' relationships, in time linear to the number of neurons instead of quadratic time. Neurocartography scales to large data, such as the ImageNet dataset with 1.2M images. The system's tightly coordinated views integrate the scalable techniques to visualize the concepts and their relationships, projecting the concept associations to a 2D space in Neuron Projection View, and summarizing neuron clusters and their relationships in Graph View. Through a large-scale human evaluation, we demonstrate that our technique discovers neuron groups that represent coherent, human-meaningful concepts. And through usage scenarios, we describe how our approaches enable interesting and surprising discoveries, such as concept cascades of related and isolated concepts. The Neurocartography visualization runs in modern browsers and is open-sourced.", "title": "NeuroCartography: Scalable Automatic Visual Summarization of Concepts in Deep Neural Networks", "normalizedTitle": "NeuroCartography: Scalable Automatic Visual Summarization of Concepts in Deep Neural Networks", "fno": "09552879", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Neurons", "Dogs", "Feature Extraction", "Visualization", "Faces", "Deep Learning", "Semantics", "Deep Learning Interpretability", "Visual Analytics", "Scalable Summarization", "Neuron Clustering", "Neuron Embedding" ], "authors": [ { "givenName": "Haekyu", "surname": "Park", "fullName": "Haekyu Park", "affiliation": "Georgia Institute of Technology, United States", "__typename": "ArticleAuthorType" }, { "givenName": "Nilaksh", "surname": "Das", "fullName": "Nilaksh Das", "affiliation": "Georgia Institute of Technology, United States", "__typename": "ArticleAuthorType" }, { "givenName": "Rahul", "surname": "Duggal", "fullName": "Rahul Duggal", "affiliation": "Georgia Institute of Technology, United States", "__typename": "ArticleAuthorType" }, { "givenName": "Austin P.", "surname": "Wright", "fullName": "Austin P. Wright", "affiliation": "Georgia Institute of Technology, United States", "__typename": "ArticleAuthorType" }, { "givenName": "Omar", "surname": "Shaikh", "fullName": "Omar Shaikh", "affiliation": "Georgia Institute of Technology, United States", "__typename": "ArticleAuthorType" }, { "givenName": "Fred", "surname": "Hohman", "fullName": "Fred Hohman", "affiliation": "Apple, United States", "__typename": "ArticleAuthorType" }, { "givenName": "Duen Horng", "surname": "Polo Chau", "fullName": "Duen Horng Polo Chau", "affiliation": "Georgia Institute of Technology, United States", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2022-01-01 00:00:00", "pubType": "trans", "pages": "813-823", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icmew/2016/1552/0/07574665", "title": "Improve dog recognition by mining more information from both click-through logs and pre-trained models", "doi": null, "abstractUrl": "/proceedings-article/icmew/2016/07574665/12OmNvonIKr", "parentPublication": { "id": "proceedings/icmew/2016/1552/0", "title": "2016 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsc/2018/4408/0/440801a172", "title": "Semantic Image Retrieval via Active Grounding of Visual Situations", "doi": null, "abstractUrl": "/proceedings-article/icsc/2018/440801a172/12OmNyUWR7p", "parentPublication": { "id": "proceedings/icsc/2018/4408/0", "title": "2018 IEEE 12th International Conference on Semantic Computing (ICSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2017/1034/0/1034a126", "title": "Automatic 3D Single Neuron Reconstruction with Exhaustive Tracing", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2017/1034a126/12OmNyvY9sc", "parentPublication": { "id": "proceedings/iccvw/2017/1034/0", "title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000a752", "title": "Dynamic-Structured Semantic Propagation Network", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000a752/17D45WnnFXu", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iiai-aai/2021/2420/0/242000a536", "title": "Synchronous Dropout for Convolutional Neural Network", "doi": null, "abstractUrl": "/proceedings-article/iiai-aai/2021/242000a536/1Eb2u3OH408", "parentPublication": { "id": "proceedings/iiai-aai/2021/2420/0", "title": "2021 10th International Congress on Advanced Applied Informatics (IIAI-AAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600k0244", "title": "HINT: Hierarchical Neuron Concept Explainer", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600k0244/1H1hYL7OfpS", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aitest/2019/0492/0/049200a089", "title": "Behavior Pattern-Driven Test Case Selection for Deep Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/aitest/2019/049200a089/1aIROMz9QpW", "parentPublication": { "id": "proceedings/aitest/2019/0492/0", "title": "2019 IEEE International Conference On Artificial Intelligence Testing (AITest)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cloud-summit/2020/8266/0/826600a176", "title": "Pace Control via Adaptive Dropout for Federated Training: A Work-in-Progress Report", "doi": null, "abstractUrl": "/proceedings-article/cloud-summit/2020/826600a176/1pA7lpYfUWI", "parentPublication": { "id": "proceedings/cloud-summit/2020/8266/0", "title": "2020 IEEE Cloud Summit", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09413021", "title": "Enhancing Semantic Segmentation of Aerial Images with Inhibitory Neurons", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09413021/1tmhIy5kCLm", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/saner/2021/9630/0/963000a189", "title": "DeepCon: Contribution Coverage Testing for Deep Learning Systems", "doi": null, "abstractUrl": "/proceedings-article/saner/2021/963000a189/1twfs5KiFd6", "parentPublication": { "id": "proceedings/saner/2021/9630/0", "title": "2021 IEEE International Conference on Software Analysis, Evolution and Reengineering (SANER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09552921", "articleId": "1xic8w3ygrm", "__typename": "AdjacentArticleType" }, "next": { "fno": "09552848", "articleId": "1xibZGigO0o", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1zBamVZHyne", "title": "Jan.", "year": "2022", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1xibZGigO0o", "doi": "10.1109/TVCG.2021.3114806", "abstract": "Visualizing data in sports videos is gaining traction in sports analytics, given its ability to communicate insights and explicate player strategies engagingly. However, augmenting sports videos with such data visualizations is challenging, especially for sports analysts, as it requires considerable expertise in video editing. To ease the creation process, we present a design space that characterizes augmented sports videos at an element-level <italic>(what the constituents are)</italic> and clip-level <italic>(how those constituents are organized)</italic>. We do so by systematically reviewing 233 examples of augmented sports videos collected from TV channels, teams, and leagues. The design space guides selection of data insights and visualizations for various purposes. Informed by the design space and close collaboration with domain experts, we design VisCommentator, a fast prototyping tool, to eases the creation of augmented table tennis videos by leveraging machine learning-based data extractors and design space-based visualization recommendations. With VisCommentator, sports analysts can create an augmented video by <italic>selecting the data</italic> to visualize instead of manually <italic>drawing the graphical marks</italic>. Our system can be generalized to other racket sports <italic>(e.g</italic>., tennis, badminton) once the underlying datasets and models are available. A user study with seven domain experts shows high satisfaction with our system, confirms that the participants can reproduce augmented sports videos in a short period, and provides insightful implications into future improvements and opportunities.", "abstracts": [ { "abstractType": "Regular", "content": "Visualizing data in sports videos is gaining traction in sports analytics, given its ability to communicate insights and explicate player strategies engagingly. However, augmenting sports videos with such data visualizations is challenging, especially for sports analysts, as it requires considerable expertise in video editing. To ease the creation process, we present a design space that characterizes augmented sports videos at an element-level <italic>(what the constituents are)</italic> and clip-level <italic>(how those constituents are organized)</italic>. We do so by systematically reviewing 233 examples of augmented sports videos collected from TV channels, teams, and leagues. The design space guides selection of data insights and visualizations for various purposes. Informed by the design space and close collaboration with domain experts, we design VisCommentator, a fast prototyping tool, to eases the creation of augmented table tennis videos by leveraging machine learning-based data extractors and design space-based visualization recommendations. With VisCommentator, sports analysts can create an augmented video by <italic>selecting the data</italic> to visualize instead of manually <italic>drawing the graphical marks</italic>. Our system can be generalized to other racket sports <italic>(e.g</italic>., tennis, badminton) once the underlying datasets and models are available. A user study with seven domain experts shows high satisfaction with our system, confirms that the participants can reproduce augmented sports videos in a short period, and provides insightful implications into future improvements and opportunities.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Visualizing data in sports videos is gaining traction in sports analytics, given its ability to communicate insights and explicate player strategies engagingly. However, augmenting sports videos with such data visualizations is challenging, especially for sports analysts, as it requires considerable expertise in video editing. To ease the creation process, we present a design space that characterizes augmented sports videos at an element-level (what the constituents are) and clip-level (how those constituents are organized). We do so by systematically reviewing 233 examples of augmented sports videos collected from TV channels, teams, and leagues. The design space guides selection of data insights and visualizations for various purposes. Informed by the design space and close collaboration with domain experts, we design VisCommentator, a fast prototyping tool, to eases the creation of augmented table tennis videos by leveraging machine learning-based data extractors and design space-based visualization recommendations. With VisCommentator, sports analysts can create an augmented video by selecting the data to visualize instead of manually drawing the graphical marks. Our system can be generalized to other racket sports (e.g., tennis, badminton) once the underlying datasets and models are available. A user study with seven domain experts shows high satisfaction with our system, confirms that the participants can reproduce augmented sports videos in a short period, and provides insightful implications into future improvements and opportunities.", "title": "Augmenting Sports Videos with VisCommentator", "normalizedTitle": "Augmenting Sports Videos with VisCommentator", "fno": "09552848", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualisation", "Learning Artificial Intelligence", "Sport", "Video Signal Processing", "Augmenting Sports Videos", "Sports Analysts", "Augmented Sports Videos", "Augmented Table Tennis Videos", "Design Space Based Visualization Recommendations", "Augmented Video", "Sports", "Data Visualization", "Visualization", "Tools", "Data Mining", "TV", "Data Models", "Augmented Sports Videos", "Video Based Visualization", "Sports Visualization", "Intelligent Design Tool", "Storytelling" ], "authors": [ { "givenName": "Zhutian", "surname": "Chen", "fullName": "Zhutian Chen", "affiliation": "Department of Cognitive Science and Design Lab, State Key Lab of CAD & CG, Zhejiang University and Hong Kong University of Science and Technology, University of California, San Diego, United States", "__typename": "ArticleAuthorType" }, { "givenName": "Shuainan", "surname": "Ye", "fullName": "Shuainan Ye", "affiliation": "State Key Lab of CAD & CG, Zhejiang University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xiangtong", "surname": "Chu", "fullName": "Xiangtong Chu", "affiliation": "State Key Lab of CAD & CG, Zhejiang University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Haijun", "surname": "Xia", "fullName": "Haijun Xia", "affiliation": "Department of Cognitive Science and Design Lab, University of California, San Diego, United States", "__typename": "ArticleAuthorType" }, { "givenName": "Hui", "surname": "Zhang", "fullName": "Hui Zhang", "affiliation": "Department of Sport Science, Zhejiang University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Huamin", "surname": "Qu", "fullName": "Huamin Qu", "affiliation": "Hong Kong University of Science and Technology, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Yingcai", "surname": "Wu", "fullName": "Yingcai Wu", "affiliation": "State Key Lab of CAD & CG, Zhejiang University, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2022-01-01 00:00:00", "pubType": "trans", "pages": "824-834", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2017/1032/0/1032e856", "title": "Mutual Enhancement for Detection of Multiple Logos in Sports Videos", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032e856/12OmNAGNCgi", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09906966", "title": "RASIPAM: Interactive Pattern Mining of Multivariate Event Sequences in Racket Sports", "doi": null, "abstractUrl": "/journal/tg/2023/01/09906966/1H5ERCYJa48", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09911988", "title": "Sporthesia: Augmenting Sports Videos Using Natural Language", "doi": null, "abstractUrl": "/journal/tg/2023/01/09911988/1HeiTHl8xxe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2023/01/10035734", "title": "The Ball is in Our Court: Conducting Visualization Research With Sports Experts", "doi": null, "abstractUrl": "/magazine/cg/2023/01/10035734/1KrcgLSqCUE", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798054", "title": "Augmented Learning for Sports Using Wearable Head-worn and Wrist-worn Devices", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798054/1cJ17KUNi12", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/compsac/2019/2607/2/260702a097", "title": "Micro-Level Analysis and Visualization of Tennis Shot Patterns with Fractal Tables", "doi": null, "abstractUrl": "/proceedings-article/compsac/2019/260702a097/1cYipyP6afS", "parentPublication": { "id": "compsac/2019/2607/2", "title": "2019 IEEE 43rd Annual Computer Software and Applications Conference (COMPSAC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2020/8009/0/800900a036", "title": "Visual Analytics of Multivariate Event Sequence Data in Racquet Sports", "doi": null, "abstractUrl": "/proceedings-article/vast/2020/800900a036/1q7jwkJx00U", "parentPublication": { "id": "proceedings/vast/2020/8009/0", "title": "2020 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sti/2020/4246/0/09350415", "title": "Sports-Net18: Various Sports Classification using Transfer Learning", "doi": null, "abstractUrl": "/proceedings-article/sti/2020/09350415/1rgGtsDBdw4", "parentPublication": { "id": "proceedings/sti/2020/4246/0", "title": "2020 2nd International Conference on Sustainable Technologies for Industry 4.0 (STI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/01/09552436", "title": "TacticFlow: Visual Analytics of Ever-Changing Tactics in Racket Sports", "doi": null, "abstractUrl": "/journal/tg/2022/01/09552436/1xibYczQBfW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09552879", "articleId": "1xibY2EaE80", "__typename": "AdjacentArticleType" }, "next": { "fno": "09552436", "articleId": "1xibYczQBfW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1zBayVqQO7C", "name": "ttg202201-09552848s1-supp1-3114806.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09552848s1-supp1-3114806.mp4", "extension": "mp4", "size": "16.2 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1zBamVZHyne", "title": "Jan.", "year": "2022", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1xibYczQBfW", "doi": "10.1109/TVCG.2021.3114832", "abstract": "Event sequence mining is often used to summarize patterns from hundreds of sequences but faces special challenges when handling racket sports data. In racket sports (e.g., tennis and badminton), a player hitting the ball is considered a multivariate event consisting of multiple attributes (e.g., hit technique and ball position). A rally (i.e., a series of consecutive hits beginning with one player serving the ball and ending with one player winning a point) thereby can be viewed as a multivariate event sequence. Mining frequent patterns and depicting how patterns change over time is instructive and meaningful to players who want to learn more short-term competitive strategies (i.e., tactics) that encompass multiple hits. However, players in racket sports usually change their tactics rapidly according to the opponent&#x0027;s reaction, resulting in ever-changing tactic progression. In this work, we introduce a tailored visualization system built on a novel multivariate sequence pattern mining algorithm to facilitate explorative identification and analysis of various tactics and tactic progression. The algorithm can mine multiple non-overlapping multivariate patterns from hundreds of sequences effectively. Based on the mined results, we propose a glyph-based Sankey diagram to visualize the ever-changing tactic progression and support interactive data exploration. Through two case studies with four domain experts in tennis and badminton, we demonstrate that our system can effectively obtain insights about tactic progression in most racket sports. We further discuss the strengths and the limitations of our system based on domain experts&#x0027; feedback.", "abstracts": [ { "abstractType": "Regular", "content": "Event sequence mining is often used to summarize patterns from hundreds of sequences but faces special challenges when handling racket sports data. In racket sports (e.g., tennis and badminton), a player hitting the ball is considered a multivariate event consisting of multiple attributes (e.g., hit technique and ball position). A rally (i.e., a series of consecutive hits beginning with one player serving the ball and ending with one player winning a point) thereby can be viewed as a multivariate event sequence. Mining frequent patterns and depicting how patterns change over time is instructive and meaningful to players who want to learn more short-term competitive strategies (i.e., tactics) that encompass multiple hits. However, players in racket sports usually change their tactics rapidly according to the opponent&#x0027;s reaction, resulting in ever-changing tactic progression. In this work, we introduce a tailored visualization system built on a novel multivariate sequence pattern mining algorithm to facilitate explorative identification and analysis of various tactics and tactic progression. The algorithm can mine multiple non-overlapping multivariate patterns from hundreds of sequences effectively. Based on the mined results, we propose a glyph-based Sankey diagram to visualize the ever-changing tactic progression and support interactive data exploration. Through two case studies with four domain experts in tennis and badminton, we demonstrate that our system can effectively obtain insights about tactic progression in most racket sports. We further discuss the strengths and the limitations of our system based on domain experts&#x0027; feedback.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Event sequence mining is often used to summarize patterns from hundreds of sequences but faces special challenges when handling racket sports data. In racket sports (e.g., tennis and badminton), a player hitting the ball is considered a multivariate event consisting of multiple attributes (e.g., hit technique and ball position). A rally (i.e., a series of consecutive hits beginning with one player serving the ball and ending with one player winning a point) thereby can be viewed as a multivariate event sequence. Mining frequent patterns and depicting how patterns change over time is instructive and meaningful to players who want to learn more short-term competitive strategies (i.e., tactics) that encompass multiple hits. However, players in racket sports usually change their tactics rapidly according to the opponent's reaction, resulting in ever-changing tactic progression. In this work, we introduce a tailored visualization system built on a novel multivariate sequence pattern mining algorithm to facilitate explorative identification and analysis of various tactics and tactic progression. The algorithm can mine multiple non-overlapping multivariate patterns from hundreds of sequences effectively. Based on the mined results, we propose a glyph-based Sankey diagram to visualize the ever-changing tactic progression and support interactive data exploration. Through two case studies with four domain experts in tennis and badminton, we demonstrate that our system can effectively obtain insights about tactic progression in most racket sports. We further discuss the strengths and the limitations of our system based on domain experts' feedback.", "title": "TacticFlow: Visual Analytics of Ever-Changing Tactics in Racket Sports", "normalizedTitle": "TacticFlow: Visual Analytics of Ever-Changing Tactics in Racket Sports", "fno": "09552436", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Mining", "Data Visualisation", "Sport", "Tactics", "Event Sequence Mining", "Racket Sports Data", "Tennis", "Badminton", "Hit Technique", "Consecutive Hits", "Multivariate Event Sequence", "Mining Frequent Patterns", "Multiple Hits", "Tactic Progression", "Novel Multivariate Sequence Pattern Mining Algorithm", "Nonoverlapping Multivariate Patterns", "Mined Results", "Sports", "Data Visualization", "Visual Analytics", "Data Models", "Usability", "Task Analysis", "Pipelines", "Sports Analytics", "Multivariate Event Sequence", "Sequential Pattern Mining", "Progression Analysis" ], "authors": [ { "givenName": "Jiang", "surname": "Wu", "fullName": "Jiang Wu", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Dongyu", "surname": "Liu", "fullName": "Dongyu Liu", "affiliation": "Massachusetts Institute of Technology, United States", "__typename": "ArticleAuthorType" }, { "givenName": "Ziyang", "surname": "Guo", "fullName": "Ziyang Guo", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Qingyang", "surname": "Xu", "fullName": "Qingyang Xu", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yingcai", "surname": "Wu", "fullName": "Yingcai Wu", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2022-01-01 00:00:00", "pubType": "trans", "pages": "835-845", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ismse/2004/2217/0/22170186", "title": "Detecting Tactics Patterns for Archiving Tennis Video Clips", "doi": null, "abstractUrl": "/proceedings-article/ismse/2004/22170186/12OmNBtCCBd", "parentPublication": { "id": "proceedings/ismse/2004/2217/0", "title": "Multimedia Software Engineering, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2018/6100/0/610000b830", "title": "Soccer: Who Has the Ball? Generating Visual Analytics and Player Statistics", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2018/610000b830/17D45VObpOM", "parentPublication": { "id": "proceedings/cvprw/2018/6100/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bdva/2018/9194/0/08534022", "title": "Revealing the Invisible: Visual Analytics and Explanatory Storytelling for Advanced Team Sport Analysis", "doi": null, "abstractUrl": "/proceedings-article/bdva/2018/08534022/17D45WODasQ", "parentPublication": { "id": "proceedings/bdva/2018/9194/0", "title": "2018 International Symposium on Big Data Visual and Immersive Analytics (BDVA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09903338", "title": "The Quest for : Embedded Visualization for Augmenting Basketball Game Viewing Experiences<inline-graphic xlink:href=\"tvcg-lin-3209353-graphic-1-source.tif\"/><bold/>", "doi": null, "abstractUrl": "/journal/tg/2023/01/09903338/1GZojZ9otHO", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09906966", "title": "RASIPAM: Interactive Pattern Mining of Multivariate Event Sequences in Racket Sports", "doi": null, "abstractUrl": "/journal/tg/2023/01/09906966/1H5ERCYJa48", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09906970", "title": "Tac-Trainer: A Visual Analytics System for IoT-based Racket Sports Training", "doi": null, "abstractUrl": "/journal/tg/2023/01/09906970/1H5EW63diWA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09911988", "title": "Sporthesia: Augmenting Sports Videos Using Natural Language", "doi": null, "abstractUrl": "/journal/tg/2023/01/09911988/1HeiTHl8xxe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a566", "title": "Virtual Reality Racket Sports: Virtual Drills for Exercise and Training", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a566/1pysv1cgLPa", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2020/8009/0/800900a036", "title": "Visual Analytics of Multivariate Event Sequence Data in Racquet Sports", "doi": null, "abstractUrl": "/proceedings-article/vast/2020/800900a036/1q7jwkJx00U", "parentPublication": { "id": "proceedings/vast/2020/8009/0", "title": "2020 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/01/09552848", "title": "Augmenting Sports Videos with VisCommentator", "doi": null, "abstractUrl": "/journal/tg/2022/01/09552848/1xibZGigO0o", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09552848", "articleId": "1xibZGigO0o", "__typename": "AdjacentArticleType" }, "next": { "fno": "09552250", "articleId": "1xic6GuRQ76", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1zBamVZHyne", "title": "Jan.", "year": "2022", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1xic6GuRQ76", "doi": "10.1109/TVCG.2021.3114781", "abstract": "Video moderation, which refers to remove deviant or explicit content from e-commerce livestreams, has become prevalent owing to social and engaging features. However, this task is tedious and time consuming due to the difficulties associated with watching and reviewing multimodal video content, including video frames and audio clips. To ensure effective video moderation, we propose VideoModerator, a risk-aware framework that seamlessly integrates human knowledge with machine insights. This framework incorporates a set of advanced machine learning models to extract the risk-aware features from multimodal video content and discover potentially deviant videos. Moreover, this framework introduces an interactive visualization interface with three views, namely, a video view, a frame view, and an audio view. In the video view, we adopt a segmented timeline and highlight high-risk periods that may contain deviant information. In the frame view, we present a novel visual summarization method that combines risk-aware features and video context to enable quick video navigation. In the audio view, we employ a storyline-based design to provide a multi-faceted overview which can be used to explore audio content. Furthermore, we report the usage of VideoModerator through a case scenario and conduct experiments and a controlled user study to validate its effectiveness.", "abstracts": [ { "abstractType": "Regular", "content": "Video moderation, which refers to remove deviant or explicit content from e-commerce livestreams, has become prevalent owing to social and engaging features. However, this task is tedious and time consuming due to the difficulties associated with watching and reviewing multimodal video content, including video frames and audio clips. To ensure effective video moderation, we propose VideoModerator, a risk-aware framework that seamlessly integrates human knowledge with machine insights. This framework incorporates a set of advanced machine learning models to extract the risk-aware features from multimodal video content and discover potentially deviant videos. Moreover, this framework introduces an interactive visualization interface with three views, namely, a video view, a frame view, and an audio view. In the video view, we adopt a segmented timeline and highlight high-risk periods that may contain deviant information. In the frame view, we present a novel visual summarization method that combines risk-aware features and video context to enable quick video navigation. In the audio view, we employ a storyline-based design to provide a multi-faceted overview which can be used to explore audio content. Furthermore, we report the usage of VideoModerator through a case scenario and conduct experiments and a controlled user study to validate its effectiveness.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Video moderation, which refers to remove deviant or explicit content from e-commerce livestreams, has become prevalent owing to social and engaging features. However, this task is tedious and time consuming due to the difficulties associated with watching and reviewing multimodal video content, including video frames and audio clips. To ensure effective video moderation, we propose VideoModerator, a risk-aware framework that seamlessly integrates human knowledge with machine insights. This framework incorporates a set of advanced machine learning models to extract the risk-aware features from multimodal video content and discover potentially deviant videos. Moreover, this framework introduces an interactive visualization interface with three views, namely, a video view, a frame view, and an audio view. In the video view, we adopt a segmented timeline and highlight high-risk periods that may contain deviant information. In the frame view, we present a novel visual summarization method that combines risk-aware features and video context to enable quick video navigation. In the audio view, we employ a storyline-based design to provide a multi-faceted overview which can be used to explore audio content. Furthermore, we report the usage of VideoModerator through a case scenario and conduct experiments and a controlled user study to validate its effectiveness.", "title": "VideoModerator: A Risk-aware Framework for Multimodal Video Moderation in E-Commerce", "normalizedTitle": "VideoModerator: A Risk-aware Framework for Multimodal Video Moderation in E-Commerce", "fno": "09552250", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Visualization", "Task Analysis", "Visual Analytics", "Machine Learning", "Motion Pictures", "Feature Extraction", "Data Mining", "Video Moderation", "Video Visualization", "E Commerce Livestreaming" ], "authors": [ { "givenName": "Tan", "surname": "Tang", "fullName": "Tan Tang", "affiliation": "State Key Lab of CAD&CG, Zhejiang University and Zhejiang Lab, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yanhong", "surname": "Wu", "fullName": "Yanhong Wu", "affiliation": "State Key Lab of CAD&CG, Zhejiang University and Zhejiang Lab, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yingcai", "surname": "Wu", "fullName": "Yingcai Wu", "affiliation": "State Key Lab of CAD&CG, Zhejiang University and Zhejiang Lab, China", "__typename": "ArticleAuthorType" }, { "givenName": "Lingyun", "surname": "Yu", "fullName": "Lingyun Yu", "affiliation": "Department of Computing, Xi'an Jiaotong-Liverpool University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yuhong", "surname": "Li", "fullName": "Yuhong Li", "affiliation": "Alibaba Group, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2022-01-01 00:00:00", "pubType": "trans", "pages": "846-856", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ism/2009/3890/0/3890a024", "title": "A Low-Complexity Dynamic Face-Voice Feature Fusion Approach to Multimodal Person Recognition", "doi": null, "abstractUrl": "/proceedings-article/ism/2009/3890a024/12OmNBqMDvr", "parentPublication": { "id": "proceedings/ism/2009/3890/0", "title": "2009 11th IEEE International Symposium on Multimedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2004/8603/2/01394402", "title": "Video skimming based on story units via general tempo analysis", "doi": null, "abstractUrl": "/proceedings-article/icme/2004/01394402/12OmNCf1Dh0", "parentPublication": { "id": "proceedings/icme/2004/8603/2", "title": "2004 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icccnt/2012/9999/0/06395919", "title": "Audio-video based segmentation and classification using SVM", "doi": null, "abstractUrl": "/proceedings-article/icccnt/2012/06395919/12OmNvStcx4", "parentPublication": { "id": "proceedings/icccnt/2012/9999/0", "title": "2012 Third International Conference on Computing, Communication and Networking Technologies (ICCCNT 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2011/348/0/06012080", "title": "Example-based video remixing for home videos", "doi": null, "abstractUrl": "/proceedings-article/icme/2011/06012080/12OmNyrqznD", "parentPublication": { "id": "proceedings/icme/2011/348/0", "title": "2011 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mmcs/1997/7819/0/00609596", "title": "Enhanced video handling based on audio analysis", "doi": null, "abstractUrl": "/proceedings-article/mmcs/1997/00609596/12OmNzgeLB8", "parentPublication": { "id": "proceedings/mmcs/1997/7819/0", "title": "Proceedings of IEEE International Conference on Multimedia Computing and Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2019/05/08387512", "title": "Read, Watch, Listen, and Summarize: Multi-Modal Summarization for Asynchronous Text, Image, Audio and Video", "doi": null, "abstractUrl": "/journal/tk/2019/05/08387512/18TXO88ZqJq", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ccict/2022/7224/0/722400a590", "title": "Computational intelligence paradigms for audio-based video summarization", "doi": null, "abstractUrl": "/proceedings-article/ccict/2022/722400a590/1HpDZv1Ub6M", "parentPublication": { "id": "proceedings/ccict/2022/7224/0", "title": "2022 Fifth International Conference on Computational Intelligence and Communication Technologies (CCICT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300b618", "title": "Multimodal Deep Models for Predicting Affective Responses Evoked by Movies", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300b618/1i5mw7BVj8s", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/01/09557224", "title": "A Visualization Approach for Monitoring Order Processing in E-Commerce Warehouse", "doi": null, "abstractUrl": "/journal/tg/2022/01/09557224/1xlw0UMxoaY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2021/3335/0/333500a191", "title": "Conceptualizing Visual Analytic Interventions for Content Moderation", "doi": null, "abstractUrl": "/proceedings-article/vis/2021/333500a191/1yXuegZiicg", "parentPublication": { "id": "proceedings/vis/2021/3335/0", "title": "2021 IEEE Visualization Conference (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09552436", "articleId": "1xibYczQBfW", "__typename": "AdjacentArticleType" }, "next": { "fno": "09557224", "articleId": "1xlw0UMxoaY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1zBaUW96BSU", "name": "ttg202201-09552250s1-supp1-3114781.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09552250s1-supp1-3114781.mp4", "extension": "mp4", "size": "40.1 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1zBamVZHyne", "title": "Jan.", "year": "2022", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1xlw0UMxoaY", "doi": "10.1109/TVCG.2021.3114878", "abstract": "The efficiency of warehouses is vital to e-commerce. Fast order processing at the warehouses ensures timely deliveries and improves customer satisfaction. However, monitoring, analyzing, and manipulating order processing in the warehouses in real time are challenging for traditional methods due to the sheer volume of incoming orders, the fuzzy definition of delayed order patterns, and the complex decision-making of order handling priorities. In this paper, we adopt a data-driven approach and propose OrderMonitor, a visual analytics system that assists warehouse managers in analyzing and improving order processing efficiency in real time based on streaming warehouse event data. Specifically, the order processing pipeline is visualized with a novel pipeline design based on the sedimentation metaphor to facilitate real-time order monitoring and suggest potentially abnormal orders. We also design a novel visualization that depicts order timelines based on the Gantt charts and Marey&#x0027;s graphs. Such a visualization helps the managers gain insights into the performance of order processing and find major blockers for delayed orders. Furthermore, an evaluating view is provided to assist users in inspecting order details and assigning priorities to improve the processing performance. The effectiveness of OrderMonitor is evaluated with two case studies on a real-world warehouse dataset.", "abstracts": [ { "abstractType": "Regular", "content": "The efficiency of warehouses is vital to e-commerce. Fast order processing at the warehouses ensures timely deliveries and improves customer satisfaction. However, monitoring, analyzing, and manipulating order processing in the warehouses in real time are challenging for traditional methods due to the sheer volume of incoming orders, the fuzzy definition of delayed order patterns, and the complex decision-making of order handling priorities. In this paper, we adopt a data-driven approach and propose OrderMonitor, a visual analytics system that assists warehouse managers in analyzing and improving order processing efficiency in real time based on streaming warehouse event data. Specifically, the order processing pipeline is visualized with a novel pipeline design based on the sedimentation metaphor to facilitate real-time order monitoring and suggest potentially abnormal orders. We also design a novel visualization that depicts order timelines based on the Gantt charts and Marey&#x0027;s graphs. Such a visualization helps the managers gain insights into the performance of order processing and find major blockers for delayed orders. Furthermore, an evaluating view is provided to assist users in inspecting order details and assigning priorities to improve the processing performance. The effectiveness of OrderMonitor is evaluated with two case studies on a real-world warehouse dataset.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The efficiency of warehouses is vital to e-commerce. Fast order processing at the warehouses ensures timely deliveries and improves customer satisfaction. However, monitoring, analyzing, and manipulating order processing in the warehouses in real time are challenging for traditional methods due to the sheer volume of incoming orders, the fuzzy definition of delayed order patterns, and the complex decision-making of order handling priorities. In this paper, we adopt a data-driven approach and propose OrderMonitor, a visual analytics system that assists warehouse managers in analyzing and improving order processing efficiency in real time based on streaming warehouse event data. Specifically, the order processing pipeline is visualized with a novel pipeline design based on the sedimentation metaphor to facilitate real-time order monitoring and suggest potentially abnormal orders. We also design a novel visualization that depicts order timelines based on the Gantt charts and Marey's graphs. Such a visualization helps the managers gain insights into the performance of order processing and find major blockers for delayed orders. Furthermore, an evaluating view is provided to assist users in inspecting order details and assigning priorities to improve the processing performance. The effectiveness of OrderMonitor is evaluated with two case studies on a real-world warehouse dataset.", "title": "A Visualization Approach for Monitoring Order Processing in E-Commerce Warehouse", "normalizedTitle": "A Visualization Approach for Monitoring Order Processing in E-Commerce Warehouse", "fno": "09557224", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualization", "Monitoring", "Real Time Systems", "Schedules", "Delays", "Warehousing", "Visual Analytics", "Streaming Data", "Time Series Data", "E Commerce Warehouse", "Order Processing" ], "authors": [ { "givenName": "Junxiu", "surname": "Tang", "fullName": "Junxiu Tang", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yuhua", "surname": "Zhou", "fullName": "Yuhua Zhou", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Tan", "surname": "Tang", "fullName": "Tan Tang", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Di", "surname": "Weng", "fullName": "Di Weng", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Boyang", "surname": "Xie", "fullName": "Boyang Xie", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Lingyun", "surname": "Yu", "fullName": "Lingyun Yu", "affiliation": "Department of Computing, Xi’ an Jiaotong-Liverpool University, Suzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Huaqiang", "surname": "Zhang", "fullName": "Huaqiang Zhang", "affiliation": "Alibaba Group, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yingcai", "surname": "Wu", "fullName": "Yingcai Wu", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2022-01-01 00:00:00", "pubType": "trans", "pages": "857-867", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/computationworld/2009/3862/0/3862a072", "title": "An Agent-Based Adaptive Join Algorithm for Distributed Data Warehousing", "doi": null, "abstractUrl": "/proceedings-article/computationworld/2009/3862a072/12OmNviHKnw", "parentPublication": { "id": "proceedings/computationworld/2009/3862/0", "title": "Future Computing, Service Computation, Cognitive, Adaptive, Content, Patterns, Computation World", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2003/1874/8/187480232a", "title": "Ad-Hoc Association-Rule Mining within the Data Warehouse", "doi": null, "abstractUrl": "/proceedings-article/hicss/2003/187480232a/12OmNvpNIrw", "parentPublication": { "id": "proceedings/hicss/2003/1874/8", "title": "36th Annual Hawaii International Conference on System Sciences, 2003. Proceedings of the", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/tools/1999/0393/0/03930400", "title": "A Composite Data Model in Object-Oriented Data Warehousing", "doi": null, "abstractUrl": "/proceedings-article/tools/1999/03930400/12OmNxvwoYY", "parentPublication": { "id": "proceedings/tools/1999/0393/0", "title": "Proceedings Technology of Object-Oriented Languages and Systems. TOOLS 31", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icde/2012/4747/0/4747b382", "title": "The Credit Suisse Meta-data Warehouse", "doi": null, "abstractUrl": "/proceedings-article/icde/2012/4747b382/12OmNyRxFqi", "parentPublication": { "id": "proceedings/icde/2012/4747/0", "title": "2012 IEEE 28th International Conference on Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ghtc/2011/4595/0/4595a442", "title": "The Need for Warehouse Information in a Disaster Recovery Communication System", "doi": null, "abstractUrl": "/proceedings-article/ghtc/2011/4595a442/12OmNz6iOeX", "parentPublication": { "id": "proceedings/ghtc/2011/4595/0", "title": "IEEE Global Humanitarian Technology Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ideas/1997/8114/0/81140151", "title": "A uniform approach for selecting views and indexes in a data warehouse", "doi": null, "abstractUrl": "/proceedings-article/ideas/1997/81140151/12OmNzUPplj", "parentPublication": { "id": "proceedings/ideas/1997/8114/0", "title": "Database Engineering and Applications Symposium, International", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icppw/2002/1680/0/16800383", "title": "Parallel Generation of Base Relation Snapshots for Materialized View Maintenance in Data Warehouse Environment", "doi": null, "abstractUrl": "/proceedings-article/icppw/2002/16800383/12OmNzWfp1s", "parentPublication": { "id": "proceedings/icppw/2002/1680/0", "title": "Proceedings. International Conference on Parallel Processing Workshop", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2007/2755/0/04076817", "title": "Architecting a Dimensional Document Warehouse", "doi": null, "abstractUrl": "/proceedings-article/hicss/2007/04076817/17D45Wc1IJc", "parentPublication": { "id": "proceedings/hicss/2007/2755/0", "title": "2007 40th Annual Hawaii International Conference on System Sciences (HICSS'07)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icemme/2019/5588/0/558800a609", "title": "Research on Performance Evaluation of Warehouse Operators in E-Commerce Enterprises", "doi": null, "abstractUrl": "/proceedings-article/icemme/2019/558800a609/1hrLlnPWwhy", "parentPublication": { "id": "proceedings/icemme/2019/5588/0", "title": "2019 International Conference on Economic Management and Model Engineering (ICEMME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icitm/2021/3585/0/358500a089", "title": "Implementation of Lean and Logistics Principles to Reduce Non-conformities of a Warehouse in the Metalworking Industry", "doi": null, "abstractUrl": "/proceedings-article/icitm/2021/358500a089/1uOvNINv5PW", "parentPublication": { "id": "proceedings/icitm/2021/3585/0", "title": "2021 10th International Conference on Industrial Technology and Management (ICITM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09552250", "articleId": "1xic6GuRQ76", "__typename": "AdjacentArticleType" }, "next": { "fno": "09555925", "articleId": "1xlw1EdMc9i", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1zBaJphLw3e", "name": "ttg202201-09557224s1-supp1-3114878.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09557224s1-supp1-3114878.mp4", "extension": "mp4", "size": "36 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1zBamVZHyne", "title": "Jan.", "year": "2022", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1xlw1EdMc9i", "doi": "10.1109/TVCG.2021.3114821", "abstract": "We present a visual analytics tool, MiningVis, to explore the long-term historical evolution and dynamics of the Bitcoin mining ecosystem. Bitcoin is a cryptocurrency that attracts much attention but remains difficult to understand. Particularly important to the success, stability, and security of Bitcoin is a component of the system called “mining.” Miners are responsible for validating transactions and are incentivized to participate by the promise of a monetary reward. Mining pools have emerged as collectives of miners that ensure a more stable and predictable income. MiningVis aims to help analysts understand the evolution and dynamics of the Bitcoin mining ecosystem, including mining market statistics, multi-measure mining pool rankings, and pool hopping behavior. Each of these features can be compared to external data concerning pool characteristics and Bitcoin news. In order to assess the value of MiningVis, we conducted online interviews and insight-based user studies with Bitcoin miners. We describe research questions tackled and insights made by our participants and illustrate practical implications for visual analytics systems for Bitcoin mining.", "abstracts": [ { "abstractType": "Regular", "content": "We present a visual analytics tool, MiningVis, to explore the long-term historical evolution and dynamics of the Bitcoin mining ecosystem. Bitcoin is a cryptocurrency that attracts much attention but remains difficult to understand. Particularly important to the success, stability, and security of Bitcoin is a component of the system called “mining.” Miners are responsible for validating transactions and are incentivized to participate by the promise of a monetary reward. Mining pools have emerged as collectives of miners that ensure a more stable and predictable income. MiningVis aims to help analysts understand the evolution and dynamics of the Bitcoin mining ecosystem, including mining market statistics, multi-measure mining pool rankings, and pool hopping behavior. Each of these features can be compared to external data concerning pool characteristics and Bitcoin news. In order to assess the value of MiningVis, we conducted online interviews and insight-based user studies with Bitcoin miners. We describe research questions tackled and insights made by our participants and illustrate practical implications for visual analytics systems for Bitcoin mining.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a visual analytics tool, MiningVis, to explore the long-term historical evolution and dynamics of the Bitcoin mining ecosystem. Bitcoin is a cryptocurrency that attracts much attention but remains difficult to understand. Particularly important to the success, stability, and security of Bitcoin is a component of the system called “mining.” Miners are responsible for validating transactions and are incentivized to participate by the promise of a monetary reward. Mining pools have emerged as collectives of miners that ensure a more stable and predictable income. MiningVis aims to help analysts understand the evolution and dynamics of the Bitcoin mining ecosystem, including mining market statistics, multi-measure mining pool rankings, and pool hopping behavior. Each of these features can be compared to external data concerning pool characteristics and Bitcoin news. In order to assess the value of MiningVis, we conducted online interviews and insight-based user studies with Bitcoin miners. We describe research questions tackled and insights made by our participants and illustrate practical implications for visual analytics systems for Bitcoin mining.", "title": "MiningVis: Visual Analytics of the Bitcoin Mining Economy", "normalizedTitle": "MiningVis: Visual Analytics of the Bitcoin Mining Economy", "fno": "09555925", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Bitcoin", "Tools", "Economics", "Data Mining", "Visual Analytics", "Hardware", "Security", "Visual Analytics", "Bitcoin", "Bitcoin Mining", "Mining Pools", "Pool Hopping" ], "authors": [ { "givenName": "Natkamon", "surname": "Tovanich", "fullName": "Natkamon Tovanich", "affiliation": "IRT SystemX, Palaiseau, France", "__typename": "ArticleAuthorType" }, { "givenName": "Nicolas", "surname": "Soulié", "fullName": "Nicolas Soulié", "affiliation": "Université Paris-Saclay, Univ Evry, IMT-BS, LITEM, Evry-Courcouronnes, France", "__typename": "ArticleAuthorType" }, { "givenName": "Nicolas", "surname": "Heulot", "fullName": "Nicolas Heulot", "affiliation": "IRT SystemX, Palaiseau, France", "__typename": "ArticleAuthorType" }, { "givenName": "Petra", "surname": "Isenberg", "fullName": "Petra Isenberg", "affiliation": "Université Paris-Saclay, CNRS, Inria, LISN, Orsay, France", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2022-01-01 00:00:00", "pubType": "trans", "pages": "868-878", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icissp/2015/135/0/07509935", "title": "On detection of bitcoin mining redirection attacks", "doi": null, "abstractUrl": "/proceedings-article/icissp/2015/07509935/12OmNBZpHao", "parentPublication": { "id": "proceedings/icissp/2015/135/0", "title": "2015 International Conference on Information Systems Security and Privacy (ICISSP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisce/2018/5500/0/550000a582", "title": "Toward Socially Optimal Bitcoin Mining", "doi": null, "abstractUrl": "/proceedings-article/icisce/2018/550000a582/17D45VsBU1E", "parentPublication": { "id": "proceedings/icisce/2018/5500/0", "title": "2018 5th International Conference on Information Science and Control Engineering (ICISCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvcbt/2019/3669/0/366900a043", "title": "Anti-Withholding Reward System to Secure Blockchain Mining Pools", "doi": null, "abstractUrl": "/proceedings-article/cvcbt/2019/366900a043/1cdOwNWzCgw", "parentPublication": { "id": "proceedings/cvcbt/2019/3669/0", "title": "2019 Crypto Valley Conference on Blockchain Technology (CVCBT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sp/2019/6660/0/666000a935", "title": "Bitcoin vs. Bitcoin Cash: Coexistence or Downfall of Bitcoin Cash?", "doi": null, "abstractUrl": "/proceedings-article/sp/2019/666000a935/1dlwm7PtmjC", "parentPublication": { "id": "proceedings/sp/2019/6660/0", "title": "2019 IEEE Symposium on Security and Privacy (SP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0/298000a687", "title": "Multi-Miner's Cooperative Evolution Method of Bitcoin Pool Based on Temporal Difference Leaning Method", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2019/298000a687/1ehBzxYsco0", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0", "title": "2019 International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/blockchain/2019/4693/0/469300a107", "title": "Bitcoin Mining with Transaction Fees: A Game on the Block Size", "doi": null, "abstractUrl": "/proceedings-article/blockchain/2019/469300a107/1gjS53xOkOQ", "parentPublication": { "id": "proceedings/blockchain/2019/4693/0", "title": "2019 IEEE International Conference on Blockchain (Blockchain)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/nt/2020/02/09036064", "title": "Survive and Thrive: A Stochastic Game for DDoS Attacks in Bitcoin Mining Pools", "doi": null, "abstractUrl": "/journal/nt/2020/02/09036064/1iaeoa9JZWo", "parentPublication": { "id": "trans/nt", "title": "IEEE/ACM Transactions on Networking", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigcom/2020/8275/0/09160462", "title": "Measurement and Analysis of the Bitcoin Networks: A View from Mining Pools", "doi": null, "abstractUrl": "/proceedings-article/bigcom/2020/09160462/1m4CJmCFAVa", "parentPublication": { "id": "proceedings/bigcom/2020/8275/0", "title": "2020 6th International Conference on Big Data Computing and Communications (BIGCOM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/blockchain/2020/0495/0/049500a036", "title": "Analysing the Benefit of Selfish Mining with Multiple Players", "doi": null, "abstractUrl": "/proceedings-article/blockchain/2020/049500a036/1pttQLCnQAM", "parentPublication": { "id": "proceedings/blockchain/2020/0495/0", "title": "2020 IEEE International Conference on Blockchain (Blockchain)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/srds/2021/3819/0/381900a109", "title": "Characterizing the Impact of Network Delay on Bitcoin Mining", "doi": null, "abstractUrl": "/proceedings-article/srds/2021/381900a109/1yJZd71wVhe", "parentPublication": { "id": "proceedings/srds/2021/3819/0", "title": "2021 40th International Symposium on Reliable Distributed Systems (SRDS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09557224", "articleId": "1xlw0UMxoaY", "__typename": "AdjacentArticleType" }, "next": { "fno": "09552886", "articleId": "1xic6y40Iwg", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1zBaXn4SCZO", "name": "ttg202201-09555925s1-supp1-3114821.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09555925s1-supp1-3114821.mp4", "extension": "mp4", "size": "21.8 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1zBamVZHyne", "title": "Jan.", "year": "2022", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1xic6y40Iwg", "doi": "10.1109/TVCG.2021.3114800", "abstract": "Breaking news and first-hand reports often trend on social media platforms before traditional news outlets cover them. The real-time analysis of posts on such platforms can reveal valuable and timely insights for journalists, politicians, business analysts, and first responders, but the high number and diversity of new posts pose a challenge. In this work, we present an interactive system that enables the visual analysis of streaming social media data on a large scale in real-time. We propose an efficient and explainable dynamic clustering algorithm that powers a continuously updated visualization of the current thematic landscape as well as detailed visual summaries of specific topics of interest. Our parallel clustering strategy provides an adaptive stream with a digestible but diverse selection of recent posts related to relevant topics. We also integrate familiar visual metaphors that are highly interlinked for enabling both explorative and more focused monitoring tasks. Analysts can gradually increase the resolution to dive deeper into particular topics. In contrast to previous work, our system also works with non-geolocated posts and avoids extensive preprocessing such as detecting events. We evaluated our dynamic clustering algorithm and discuss several use cases that show the utility of our system.", "abstracts": [ { "abstractType": "Regular", "content": "Breaking news and first-hand reports often trend on social media platforms before traditional news outlets cover them. The real-time analysis of posts on such platforms can reveal valuable and timely insights for journalists, politicians, business analysts, and first responders, but the high number and diversity of new posts pose a challenge. In this work, we present an interactive system that enables the visual analysis of streaming social media data on a large scale in real-time. We propose an efficient and explainable dynamic clustering algorithm that powers a continuously updated visualization of the current thematic landscape as well as detailed visual summaries of specific topics of interest. Our parallel clustering strategy provides an adaptive stream with a digestible but diverse selection of recent posts related to relevant topics. We also integrate familiar visual metaphors that are highly interlinked for enabling both explorative and more focused monitoring tasks. Analysts can gradually increase the resolution to dive deeper into particular topics. In contrast to previous work, our system also works with non-geolocated posts and avoids extensive preprocessing such as detecting events. We evaluated our dynamic clustering algorithm and discuss several use cases that show the utility of our system.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Breaking news and first-hand reports often trend on social media platforms before traditional news outlets cover them. The real-time analysis of posts on such platforms can reveal valuable and timely insights for journalists, politicians, business analysts, and first responders, but the high number and diversity of new posts pose a challenge. In this work, we present an interactive system that enables the visual analysis of streaming social media data on a large scale in real-time. We propose an efficient and explainable dynamic clustering algorithm that powers a continuously updated visualization of the current thematic landscape as well as detailed visual summaries of specific topics of interest. Our parallel clustering strategy provides an adaptive stream with a digestible but diverse selection of recent posts related to relevant topics. We also integrate familiar visual metaphors that are highly interlinked for enabling both explorative and more focused monitoring tasks. Analysts can gradually increase the resolution to dive deeper into particular topics. In contrast to previous work, our system also works with non-geolocated posts and avoids extensive preprocessing such as detecting events. We evaluated our dynamic clustering algorithm and discuss several use cases that show the utility of our system.", "title": "Real-Time Visual Analysis of High-Volume Social Media Posts", "normalizedTitle": "Real-Time Visual Analysis of High-Volume Social Media Posts", "fno": "09552886", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Social Networking Online", "Heuristic Algorithms", "Data Visualization", "Clustering Algorithms", "Real Time Systems", "Text Analysis", "Visual Analytics", "Social Media Analysis", "Dynamic Clustering", "Streaming Data" ], "authors": [ { "givenName": "Johannes", "surname": "Knittel", "fullName": "Johannes Knittel", "affiliation": "University of Stuttgart, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Steffen", "surname": "Koch", "fullName": "Steffen Koch", "affiliation": "University of Stuttgart, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Tan", "surname": "Tang", "fullName": "Tan Tang", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Wei", "surname": "Chen", "fullName": "Wei Chen", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yingcai", "surname": "Wu", "fullName": "Yingcai Wu", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Shixia", "surname": "Liu", "fullName": "Shixia Liu", "affiliation": "Tsinghua University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Thomas", "surname": "Ertl", "fullName": "Thomas Ertl", "affiliation": "University of Stuttgart, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2022-01-01 00:00:00", "pubType": "trans", "pages": "879-889", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icdcs/2017/1792/0/1792c497", "title": "Pythia: A System for Online Topic Discovery of Social Media Posts", "doi": null, "abstractUrl": "/proceedings-article/icdcs/2017/1792c497/12OmNqMPfQT", "parentPublication": { "id": "proceedings/icdcs/2017/1792/0", "title": "2017 IEEE 37th International Conference on Distributed Computing Systems (ICDCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/12/06875992", "title": "EvoRiver: Visual Analysis of Topic Coopetition on Social Media", "doi": null, "abstractUrl": "/journal/tg/2014/12/06875992/13rRUxBa563", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2013/07/mco2013070030", "title": "Visual Analytics Support for Intelligence Analysis", "doi": null, "abstractUrl": "/magazine/co/2013/07/mco2013070030/13rRUxD9h0P", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/12/06876032", "title": "OpinionFlow: Visual Analysis of Opinion Diffusion on Social Media", "doi": null, "abstractUrl": "/journal/tg/2014/12/06876032/13rRUxYINfe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/12/06876013", "title": "#FluxFlow: Visual Analysis of Anomalous Information Spreading on Social Media", "doi": null, "abstractUrl": "/journal/tg/2014/12/06876013/13rRUy0qnGn", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/12/ttg2013122012", "title": "Visual Analysis of Topic Competition on Social Media", "doi": null, "abstractUrl": "/journal/tg/2013/12/ttg2013122012/13rRUyogGAa", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata/2017/3066/0/08276881", "title": "Event Detection and Key Posts Discovering in Social Media Data Streams", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2017/08276881/17D45VsBU20", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata/2017/3066/0", "title": "2017 IEEE International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bdva/2018/9194/0/08534023", "title": "SocialOcean: Visual Analysis and Characterization of Social Media Bubbles", "doi": null, "abstractUrl": "/proceedings-article/bdva/2018/08534023/17D45WIXbOL", "parentPublication": { "id": "proceedings/bdva/2018/9194/0", "title": "2018 International Symposium on Big Data Visual and Immersive Analytics (BDVA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsc/2019/6783/0/08665516", "title": "MVP: Finding the Most Valuable Posts in Financial Social Networks", "doi": null, "abstractUrl": "/proceedings-article/icsc/2019/08665516/18qcefq3VwQ", "parentPublication": { "id": "proceedings/icsc/2019/6783/0", "title": "2019 IEEE 13th International Conference on Semantic Computing (ICSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2022/4609/0/460900b168", "title": "Disentangling the Information Flood on OSNs: Finding Notable Posts and Topics", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2022/460900b168/1KBqYUQ5xqo", "parentPublication": { "id": "proceedings/icdmw/2022/4609/0", "title": "2022 IEEE International Conference on Data Mining Workshops (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09555925", "articleId": "1xlw1EdMc9i", "__typename": "AdjacentArticleType" }, "next": { "fno": "09552923", "articleId": "1xic1nONd3q", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1zBaUyShxIY", "name": "ttg202201-09552886s1-supp1-3114800.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09552886s1-supp1-3114800.mp4", "extension": "mp4", "size": "15 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNvTBB89", "title": "Feb.", "year": "2018", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "24", "label": "Feb.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwfZBVr", "doi": "10.1109/TVCG.2017.2780598", "abstract": null, "abstracts": [], "normalizedAbstract": null, "title": "State of the Journal", "normalizedTitle": "State of the Journal", "fno": "08241922", "hasPdf": true, "idPrefix": "tg", "keywords": [], "authors": [ { "givenName": "Leila De", "surname": "Floriani", "fullName": "Leila De Floriani", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "02", "pubDate": "2018-02-01 00:00:00", "pubType": "trans", "pages": "1036-1037", "year": "2018", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tg/2014/01/ttg2014010001", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/tg/2014/01/ttg2014010001/13rRUEgarsI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2018/01/08173510", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/td/2018/01/08173510/13rRUIJuxv3", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2014/01/ttd2014010001", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/td/2014/01/ttd2014010001/13rRUwd9CFL", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/2018/01/08176070", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/tc/2018/01/08176070/13rRUxBa55x", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/2012/02/ttc2012020145", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/tc/2012/02/ttc2012020145/13rRUxcbnBM", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tb/2007/01/n0001", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/tb/2007/01/n0001/13rRUy3gnbJ", "parentPublication": { "id": "trans/tb", "title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/2017/01/07779242", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/tc/2017/01/07779242/13rRUygT7mj", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/2016/01/07350367", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/tc/2016/01/07350367/13rRUyuNsET", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/03/08974588", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/tg/2020/03/08974588/1gZh3n61QTC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/2020/04/09032252", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/tc/2020/04/09032252/1i6VsXPOgJq", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": null, "next": { "fno": "07833028", "articleId": "13rRUwInvsX", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNvTBB89", "title": "Feb.", "year": "2018", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "24", "label": "Feb.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwInvsX", "doi": "10.1109/TVCG.2017.2658570", "abstract": "Virtual Reality (VR) Head-Mounted Displays (HMDs) are on the verge of becoming commodity hardware available to the average user and feasible to use as a tool for 3D work. Some HMDs include front-facing cameras, enabling Augmented Reality (AR) functionality. Apart from avoiding collisions with the environment, interaction with virtual objects may also be affected by seeing the real environment. However, whether these effects are positive or negative has not yet been studied extensively. For most tasks it is unknown whether AR has any advantage over VR. In this work we present the results of a user study in which we compared user performance measured in task completion time on a 9 degrees of freedom object selection and transformation task performed either in AR or VR, both with a 3D input device and a mouse. Our results show faster task completion time in AR over VR. When using a 3D input device, a purely VR environment increased task completion time by 22.5 percent on average compared to AR ( Z_${p}<0.024$_Z ). Surprisingly, a similar effect occurred when using a mouse: users were about 17.3 percent slower in VR than in AR ( Z_${p}<0.04$_Z ). Mouse and 3D input device produced similar task completion times in each condition (AR or VR) respectively. We further found no differences in reported comfort.", "abstracts": [ { "abstractType": "Regular", "content": "Virtual Reality (VR) Head-Mounted Displays (HMDs) are on the verge of becoming commodity hardware available to the average user and feasible to use as a tool for 3D work. Some HMDs include front-facing cameras, enabling Augmented Reality (AR) functionality. Apart from avoiding collisions with the environment, interaction with virtual objects may also be affected by seeing the real environment. However, whether these effects are positive or negative has not yet been studied extensively. For most tasks it is unknown whether AR has any advantage over VR. In this work we present the results of a user study in which we compared user performance measured in task completion time on a 9 degrees of freedom object selection and transformation task performed either in AR or VR, both with a 3D input device and a mouse. Our results show faster task completion time in AR over VR. When using a 3D input device, a purely VR environment increased task completion time by 22.5 percent on average compared to AR ( ${p}<0.024$ ). Surprisingly, a similar effect occurred when using a mouse: users were about 17.3 percent slower in VR than in AR ( ${p}<0.04$ ). Mouse and 3D input device produced similar task completion times in each condition (AR or VR) respectively. We further found no differences in reported comfort.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Virtual Reality (VR) Head-Mounted Displays (HMDs) are on the verge of becoming commodity hardware available to the average user and feasible to use as a tool for 3D work. Some HMDs include front-facing cameras, enabling Augmented Reality (AR) functionality. Apart from avoiding collisions with the environment, interaction with virtual objects may also be affected by seeing the real environment. However, whether these effects are positive or negative has not yet been studied extensively. For most tasks it is unknown whether AR has any advantage over VR. In this work we present the results of a user study in which we compared user performance measured in task completion time on a 9 degrees of freedom object selection and transformation task performed either in AR or VR, both with a 3D input device and a mouse. Our results show faster task completion time in AR over VR. When using a 3D input device, a purely VR environment increased task completion time by 22.5 percent on average compared to AR ( - ). Surprisingly, a similar effect occurred when using a mouse: users were about 17.3 percent slower in VR than in AR ( - ). Mouse and 3D input device produced similar task completion times in each condition (AR or VR) respectively. We further found no differences in reported comfort.", "title": "Augmented Reality versus Virtual Reality for 3D Object Manipulation", "normalizedTitle": "Augmented Reality versus Virtual Reality for 3D Object Manipulation", "fno": "07833028", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Three Dimensional Displays", "Performance Evaluation", "Mice", "Resists", "Visualization", "Training", "Augmented Reality", "Artificial", "Augmented", "And Virtual Realities Multimedia Information Systems Information Interfaces And Representation", "Interaction Techniques Methodology And Techniques Computer Graphics" ], "authors": [ { "givenName": "Max", "surname": "Krichenbauer", "fullName": "Max Krichenbauer", "affiliation": "Department of Information Science, Nara Sentan Kagaku Gijutsu Daigakuin Daigaku, Ikoma, Nara, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Goshiro", "surname": "Yamamoto", "fullName": "Goshiro Yamamoto", "affiliation": "Department of Interactive Media Design Laboratory, Nara Institute of Science and Technology, Ikoma, Nara, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Takafumi", "surname": "Taketom", "fullName": "Takafumi Taketom", "affiliation": "Department of Interactive Media Design Laboratory, Nara Institute of Science and Technology, Ikoma, Nara, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Christian", "surname": "Sandor", "fullName": "Christian Sandor", "affiliation": "Department of Interactive Media Design Laboratory, Nara Institute of Science and Technology, Ikoma, Nara, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Hirokazu", "surname": "Kato", "fullName": "Hirokazu Kato", "affiliation": "Graduate School of Information Science, NAIST, Ikoma, Nara, Japan", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2018-02-01 00:00:00", "pubType": "trans", "pages": "1038-1048", "year": "2018", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iv/1999/0210/0/02100032", "title": "Virtual Reality and Augmented Reality as a Training Tool for Assembly Tasks", "doi": null, "abstractUrl": "/proceedings-article/iv/1999/02100032/12OmNAObbyR", "parentPublication": { "id": "proceedings/iv/1999/0210/0", "title": "1999 IEEE International Conference on Information Visualization (Cat. No. PR00210)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmu/2017/31/0/08330112", "title": "Clash tanks: An investigation of virtual and augmented reality gaming experience", "doi": null, "abstractUrl": "/proceedings-article/icmu/2017/08330112/12OmNB8TU7d", "parentPublication": { "id": "proceedings/icmu/2017/31/0", "title": "2017 Tenth International Conference on Mobile Computing and Ubiquitous Network (ICMU)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836495", "title": "Evaluating Positional Head-Tracking in Immersive VR for 3D Designers", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836495/12OmNx4Q6Cm", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504692", "title": "Evaluating wide-field-of-view augmented reality with mixed reality simulation", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504692/12OmNzRZpYz", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08447561", "title": "Transferability of Spatial Maps: Augmented Versus Virtual Reality Training", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08447561/13bd1fKQxs5", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/07/07935524", "title": "Handheld Guides in Inspection Tasks: Augmented Reality versus Picture", "doi": null, "abstractUrl": "/journal/tg/2018/07/07935524/13rRUwIF6lc", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/11/08007246", "title": "AR Feels &#x201c;Softer&#x201d; than VR: Haptic Perception of Stiffness in Augmented versus Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2017/11/08007246/13rRUwh80Hj", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sp/2022/1316/0/131600b552", "title": "SoK: Authentication in Augmented and Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/sp/2022/131600b552/1FlQIjcP4FW", "parentPublication": { "id": "proceedings/sp/2022/1316/0/", "title": "2022 IEEE Symposium on Security and Privacy (SP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798174", "title": "Comparison in Depth Perception between Virtual Reality and Augmented Reality Systems", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798174/1cJ11OY78k0", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2020/7675/0/767500a203", "title": "Industrial Augmented Reality: 3D-Content Editor for Augmented Reality Maintenance Worker Support System", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a203/1pBMigKK7F6", "parentPublication": { "id": "proceedings/ismar-adjunct/2020/7675/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08241922", "articleId": "13rRUwfZBVr", "__typename": "AdjacentArticleType" }, "next": { "fno": "07831370", "articleId": "13rRUxNEqQ1", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNvTBB89", "title": "Feb.", "year": "2018", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "24", "label": "Feb.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxNEqQ1", "doi": "10.1109/TVCG.2017.2657511", "abstract": "This paper presents a context-aware computer aided inbetweening (CACAI) technique that interpolates planar strokes to generate inbetween frames from a given set of key frames. The inbetweening is context-aware in the sense that not only the stroke’s shape but also the context (i.e., the neighborhood of a stroke) in which a stroke appears are taken into account for the stroke correspondence and interpolation. Given a pair of successive key frames, the CACAI automatically constructs the stroke correspondence between them by exploiting the context coherence between the corresponding strokes. Meanwhile, the construction algorithm is able to incorporate the user’s interaction with ease and allows the user more effective control over the correspondence process than existing stroke matching techniques. With a one-to-one stroke correspondence, the CACAI interpolates the shape and context between the corresponding strokes for the generation of intermediate frames. In the interpolation sequence, both the shape of individual strokes and the spatial layout between them are well retained such that the feature characteristics and visual appearance of the objects in the key frames can be fully preserved even when complex motions are involved in these objects. We have developed a prototype system to demonstrate the ease of use and effectiveness of the CACAI.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a context-aware computer aided inbetweening (CACAI) technique that interpolates planar strokes to generate inbetween frames from a given set of key frames. The inbetweening is context-aware in the sense that not only the stroke’s shape but also the context (i.e., the neighborhood of a stroke) in which a stroke appears are taken into account for the stroke correspondence and interpolation. Given a pair of successive key frames, the CACAI automatically constructs the stroke correspondence between them by exploiting the context coherence between the corresponding strokes. Meanwhile, the construction algorithm is able to incorporate the user’s interaction with ease and allows the user more effective control over the correspondence process than existing stroke matching techniques. With a one-to-one stroke correspondence, the CACAI interpolates the shape and context between the corresponding strokes for the generation of intermediate frames. In the interpolation sequence, both the shape of individual strokes and the spatial layout between them are well retained such that the feature characteristics and visual appearance of the objects in the key frames can be fully preserved even when complex motions are involved in these objects. We have developed a prototype system to demonstrate the ease of use and effectiveness of the CACAI.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a context-aware computer aided inbetweening (CACAI) technique that interpolates planar strokes to generate inbetween frames from a given set of key frames. The inbetweening is context-aware in the sense that not only the stroke’s shape but also the context (i.e., the neighborhood of a stroke) in which a stroke appears are taken into account for the stroke correspondence and interpolation. Given a pair of successive key frames, the CACAI automatically constructs the stroke correspondence between them by exploiting the context coherence between the corresponding strokes. Meanwhile, the construction algorithm is able to incorporate the user’s interaction with ease and allows the user more effective control over the correspondence process than existing stroke matching techniques. With a one-to-one stroke correspondence, the CACAI interpolates the shape and context between the corresponding strokes for the generation of intermediate frames. In the interpolation sequence, both the shape of individual strokes and the spatial layout between them are well retained such that the feature characteristics and visual appearance of the objects in the key frames can be fully preserved even when complex motions are involved in these objects. We have developed a prototype system to demonstrate the ease of use and effectiveness of the CACAI.", "title": "Context-Aware Computer Aided Inbetweening", "normalizedTitle": "Context-Aware Computer Aided Inbetweening", "fno": "07831370", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Interpolation", "Shape", "Context", "Layout", "Two Dimensional Displays", "Visualization", "Distortion", "Inbetween", "Correspondence Construction", "Stroke Interpolation", "Context Mesh", "CACAI" ], "authors": [ { "givenName": "Wenwu", "surname": "Yang", "fullName": "Wenwu Yang", "affiliation": "School of Computer and Information Engineering, Zhejiang Gongshang University, Hangzhou, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2018-02-01 00:00:00", "pubType": "trans", "pages": "1049-1062", "year": "2018", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iwfhr/2004/2187/0/21870438", "title": "Online Handwriting Recognition for Tamil", "doi": null, "abstractUrl": "/proceedings-article/iwfhr/2004/21870438/12OmNBdru9B", "parentPublication": { "id": "proceedings/iwfhr/2004/2187/0", "title": "Proceedings. Ninth International Workshop on Frontiers in Handwriting Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cad-graphics/2015/8020/0/07450432", "title": "Grouping of Multiple Overtraced Strokes in Interactive Freehand Sketches", "doi": null, "abstractUrl": "/proceedings-article/cad-graphics/2015/07450432/12OmNqzu6Rn", "parentPublication": { "id": "proceedings/cad-graphics/2015/8020/0", "title": "2015 14th International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icfhr/2016/0981/0/0981a187", "title": "Online Handwritten Mathematical Expressions Recognition by Merging Multiple 1D Interpretations", "doi": null, "abstractUrl": "/proceedings-article/icfhr/2016/0981a187/12OmNwoghas", "parentPublication": { "id": "proceedings/icfhr/2016/0981/0", "title": "2016 15th International Conference on Frontiers in Handwriting Recognition (ICFHR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icfhr/2010/4221/0/4221a682", "title": "Context Aware On-line Diagramming Recognition", "doi": null, "abstractUrl": "/proceedings-article/icfhr/2010/4221a682/12OmNwtn3EJ", "parentPublication": { "id": "proceedings/icfhr/2010/4221/0", "title": "Frontiers in Handwriting Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/1996/7282/3/00546945", "title": "A feature-preserved thinning algorithm for handwritten Chinese characters", "doi": null, "abstractUrl": "/proceedings-article/icpr/1996/00546945/12OmNykTNkU", "parentPublication": { "id": "proceedings/icpr/1996/7282/3", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmip/2017/5954/0/5954a068", "title": "Stroke Extraction of Handwritten Chinese Character Based on Ambiguous Zone Information", "doi": null, "abstractUrl": "/proceedings-article/icmip/2017/5954a068/12OmNzmLxQL", "parentPublication": { "id": "proceedings/icmip/2017/5954/0", "title": "2017 2nd International Conference on Multimedia and Image Processing (ICMIP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2013/4999/0/06628800", "title": "Segmenting Handwritten Math Symbols Using AdaBoost and Multi-scale Shape Context Features", "doi": null, "abstractUrl": "/proceedings-article/icdar/2013/06628800/12OmNzuZUDE", "parentPublication": { "id": "proceedings/icdar/2013/4999/0", "title": "2013 12th International Conference on Document Analysis and Recognition (ICDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/07/07452668", "title": "DrawFromDrawings: 2D Drawing Assistance via Stroke Interpolation with a Sketch Database", "doi": null, "abstractUrl": "/journal/tg/2017/07/07452668/13rRUwbJD4Q", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/07/07930445", "title": "Globally Consistent Wrinkle-Aware Shading of Line Drawings", "doi": null, "abstractUrl": "/journal/tg/2018/07/07930445/13rRUwbs2gy", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icfhr/2018/5875/0/587500a285", "title": "Multi-Perspective Multi-Modal Trajectory Descriptions for Handwritten Strokes", "doi": null, "abstractUrl": "/proceedings-article/icfhr/2018/587500a285/17D45Wuc388", "parentPublication": { "id": "proceedings/icfhr/2018/5875/0", "title": "2018 16th International Conference on Frontiers in Handwriting Recognition (ICFHR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07833028", "articleId": "13rRUwInvsX", "__typename": "AdjacentArticleType" }, "next": { "fno": "07817889", "articleId": "13rRUynHujf", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNvTBB89", "title": "Feb.", "year": "2018", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "24", "label": "Feb.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUynHujf", "doi": "10.1109/TVCG.2017.2653117", "abstract": "The increasing availability of head-mounted displays (HMDs) for home use motivates the study of the possible effects that adopting this new hardware might have on users. Moreover, while the impact of display type has been studied for different kinds of tasks, it has been scarcely explored in procedural training. Our study considered three different types of displays used by participants for training in aviation safety procedures with a serious game. The three displays were respectively representative of: (i) desktop VR (a standard desktop monitor), (ii) many setups for immersive VR used in the literature (an HMD with narrow field of view and a 3-DOF tracker), and (iii) new setups for immersive home VR (an HMD with wide field of view and 6-DOF tracker). We assessed effects on knowledge gain, and different self-reported measures (self-efficacy, engagement, presence). Unlike previous studies of display type that measured effects only immediately after the VR experience, we considered also a longer time span (2 weeks). Results indicated that the display type played a significant role in engagement and presence. The training benefits (increased knowledge and self-efficacy) were instead obtained, and maintained at two weeks, regardless of the display used. The paper discusses the implications of these results.", "abstracts": [ { "abstractType": "Regular", "content": "The increasing availability of head-mounted displays (HMDs) for home use motivates the study of the possible effects that adopting this new hardware might have on users. Moreover, while the impact of display type has been studied for different kinds of tasks, it has been scarcely explored in procedural training. Our study considered three different types of displays used by participants for training in aviation safety procedures with a serious game. The three displays were respectively representative of: (i) desktop VR (a standard desktop monitor), (ii) many setups for immersive VR used in the literature (an HMD with narrow field of view and a 3-DOF tracker), and (iii) new setups for immersive home VR (an HMD with wide field of view and 6-DOF tracker). We assessed effects on knowledge gain, and different self-reported measures (self-efficacy, engagement, presence). Unlike previous studies of display type that measured effects only immediately after the VR experience, we considered also a longer time span (2 weeks). Results indicated that the display type played a significant role in engagement and presence. The training benefits (increased knowledge and self-efficacy) were instead obtained, and maintained at two weeks, regardless of the display used. The paper discusses the implications of these results.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The increasing availability of head-mounted displays (HMDs) for home use motivates the study of the possible effects that adopting this new hardware might have on users. Moreover, while the impact of display type has been studied for different kinds of tasks, it has been scarcely explored in procedural training. Our study considered three different types of displays used by participants for training in aviation safety procedures with a serious game. The three displays were respectively representative of: (i) desktop VR (a standard desktop monitor), (ii) many setups for immersive VR used in the literature (an HMD with narrow field of view and a 3-DOF tracker), and (iii) new setups for immersive home VR (an HMD with wide field of view and 6-DOF tracker). We assessed effects on knowledge gain, and different self-reported measures (self-efficacy, engagement, presence). Unlike previous studies of display type that measured effects only immediately after the VR experience, we considered also a longer time span (2 weeks). Results indicated that the display type played a significant role in engagement and presence. The training benefits (increased knowledge and self-efficacy) were instead obtained, and maintained at two weeks, regardless of the display used. The paper discusses the implications of these results.", "title": "Effects of Different Types of Virtual Reality Display on Presence and Learning in a Safety Training Scenario", "normalizedTitle": "Effects of Different Types of Virtual Reality Display on Presence and Learning in a Safety Training Scenario", "fno": "07817889", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Training", "Safety", "Games", "Virtual Reality", "Displays", "Fidelity", "Training", "User Study", "Aviation", "Safety" ], "authors": [ { "givenName": "Fabio", "surname": "Buttussi", "fullName": "Fabio Buttussi", "affiliation": "Human-Computer Interaction Lab, Department of Mathematics, Computer Science, and Physics, University of Udine, Udine, Italy", "__typename": "ArticleAuthorType" }, { "givenName": "Luca", "surname": "Chittaro", "fullName": "Luca Chittaro", "affiliation": "Human-Computer Interaction Lab, Department of Mathematics, Computer Science, and Physics, University of Udine, Udine, Italy", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2018-02-01 00:00:00", "pubType": "trans", "pages": "1063-1076", "year": "2018", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892320", "title": "A mixed reality tele-presence platform to exchange emotion and sensory information based on MPEG-V standard", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892320/12OmNxUdv7D", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08445841", "title": "Demonstration of Olfactory Display Based on Sniffing Action", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08445841/13bd1eTtWYE", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446524", "title": "HangerOVER: Development of HMO-Embedded Haptic Display Using the Hanger Reflex and VR Application", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446524/13bd1fdV4l2", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446551", "title": "A Demonstration of ShareVR: Co-Located Experiences for Virtual Reality Between HMD and Non-HMD Users", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446551/13bd1gzWkQD", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798147", "title": "[DC] Designing VR for Teamwork: The Influence of HMD VR Communication Capabilities on Teamwork Competencies", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798147/1cJ0HhK5ANW", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090440", "title": "The Effects of Multi-sensory Aerial Firefighting Training in Virtual Reality on Situational Awareness, Workload, and Presence", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090440/1jIxmMZKrSw", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090580", "title": "A Study on the Effects of Head Mounted Displays Movement and Image Movement on Virtual Reality Sickness", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090580/1jIxns5TwxG", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09199565", "title": "Invisible Boundaries for VR: Auditory and Haptic Signals as Indicators for Real World Boundaries", "doi": null, "abstractUrl": "/journal/tg/2020/12/09199565/1ncgw44iPJu", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a528", "title": "VXSlate: Combining Head Movement and Mobile Touch for Large Virtual Display Interaction", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a528/1tnXg447e7e", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a109", "title": "Generative RGB-D Face Completion for Head-Mounted Display Removal", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a109/1tnXncnHsIg", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07831370", "articleId": "13rRUxNEqQ1", "__typename": "AdjacentArticleType" }, "next": { "fno": "07792176", "articleId": "13rRUwghd56", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNvTBB89", "title": "Feb.", "year": "2018", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "24", "label": "Feb.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwghd56", "doi": "10.1109/TVCG.2016.2642109", "abstract": "Cartograms are maps in which areas of geographic regions, such as countries and states, appear in proportion to some variable of interest, such as population or income. Cartograms are popular visualizations for geo-referenced data that have been used for over a century to illustrate patterns and trends in the world around us. Despite the popularity of cartograms, and the large number of cartogram types, there are few studies evaluating the effectiveness of cartograms in conveying information. Based on a recent task taxonomy for cartograms, we evaluate four major types of cartograms: contiguous, non-contiguous, rectangular, and Dorling cartograms. We first evaluate the effectiveness of these cartogram types by quantitative performance analysis (time and error). Second, we collect qualitative data with an attitude study and by analyzing subjective preferences. Third, we compare the quantitative and qualitative results with the results of a metrics-based cartogram evaluation. Fourth, we analyze the results of our study in the context of cartography, geography, visual perception, and demography. Finally, we consider implications for design and possible improvements.", "abstracts": [ { "abstractType": "Regular", "content": "Cartograms are maps in which areas of geographic regions, such as countries and states, appear in proportion to some variable of interest, such as population or income. Cartograms are popular visualizations for geo-referenced data that have been used for over a century to illustrate patterns and trends in the world around us. Despite the popularity of cartograms, and the large number of cartogram types, there are few studies evaluating the effectiveness of cartograms in conveying information. Based on a recent task taxonomy for cartograms, we evaluate four major types of cartograms: contiguous, non-contiguous, rectangular, and Dorling cartograms. We first evaluate the effectiveness of these cartogram types by quantitative performance analysis (time and error). Second, we collect qualitative data with an attitude study and by analyzing subjective preferences. Third, we compare the quantitative and qualitative results with the results of a metrics-based cartogram evaluation. Fourth, we analyze the results of our study in the context of cartography, geography, visual perception, and demography. Finally, we consider implications for design and possible improvements.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Cartograms are maps in which areas of geographic regions, such as countries and states, appear in proportion to some variable of interest, such as population or income. Cartograms are popular visualizations for geo-referenced data that have been used for over a century to illustrate patterns and trends in the world around us. Despite the popularity of cartograms, and the large number of cartogram types, there are few studies evaluating the effectiveness of cartograms in conveying information. Based on a recent task taxonomy for cartograms, we evaluate four major types of cartograms: contiguous, non-contiguous, rectangular, and Dorling cartograms. We first evaluate the effectiveness of these cartogram types by quantitative performance analysis (time and error). Second, we collect qualitative data with an attitude study and by analyzing subjective preferences. Third, we compare the quantitative and qualitative results with the results of a metrics-based cartogram evaluation. Fourth, we analyze the results of our study in the context of cartography, geography, visual perception, and demography. Finally, we consider implications for design and possible improvements.", "title": "Evaluating Cartogram Effectiveness", "normalizedTitle": "Evaluating Cartogram Effectiveness", "fno": "07792176", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Shape", "Sociology", "Statistics", "Voting", "Topology", "Data Visualization", "Geography", "Cartograms", "Geo Visualization", "Subjective Evaluation" ], "authors": [ { "givenName": "Sabrina", "surname": "Nusrat", "fullName": "Sabrina Nusrat", "affiliation": "University of Arizona, Tucson, AZ", "__typename": "ArticleAuthorType" }, { "givenName": "Md. Jawaherul", "surname": "Alam", "fullName": "Md. Jawaherul Alam", "affiliation": "University of California, Irvine, CA", "__typename": "ArticleAuthorType" }, { "givenName": "Stephen", "surname": "Kobourov", "fullName": "Stephen Kobourov", "affiliation": "University of Arizona, Tucson, AZ", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2018-02-01 00:00:00", "pubType": "trans", "pages": "1077-1090", "year": "2018", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/1998/9176/0/91760197", "title": "Continuous Cartogram Construction", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1998/91760197/12OmNA14A9k", "parentPublication": { "id": "proceedings/ieee-vis/1998/9176/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visap/2017/3490/0/08282365", "title": "Adapted dorling cartogram on wage inequality in Portugal", "doi": null, "abstractUrl": "/proceedings-article/visap/2017/08282365/12OmNBcj5CC", "parentPublication": { "id": "proceedings/visap/2017/3490/0", "title": "2017 IEEE VIS Arts Program (VISAP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse/2015/1934/1/1934a020", "title": "Developing and Evaluating Software Engineering Process Theories", "doi": null, "abstractUrl": "/proceedings-article/icse/2015/1934a020/12OmNwF0C6J", "parentPublication": { "id": "proceedings/icse/2015/1934/2", "title": "2015 IEEE/ACM 37th IEEE International Conference on Software Engineering (ICSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/09/07328332", "title": "Visual Encoding of Dissimilarity Data via Topology-Preserving Map Deformation", "doi": null, "abstractUrl": "/journal/tg/2016/09/07328332/13rRUwbs2b6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/05/v0757", "title": "Worldmapper: The World as You've Never Seen it Before", "doi": null, "abstractUrl": "/journal/tg/2006/05/v0757/13rRUwgQpqE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/10/08078198", "title": "Cartogram Visualization for Bivariate Geo-Statistical Data", "doi": null, "abstractUrl": "/journal/tg/2018/10/08078198/13rRUx0xPZE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/trustcom-bigdatase/2018/4388/0/438801a938", "title": "Privacy Parameter Variation Using RAPPOR on a Malware Dataset", "doi": null, "abstractUrl": "/proceedings-article/trustcom-bigdatase/2018/438801a938/17D45VtKixl", "parentPublication": { "id": "proceedings/trustcom-bigdatase/2018/4388/0", "title": "2018 17th IEEE International Conference On Trust, Security And Privacy In Computing And Communications/ 12th IEEE International Conference On Big Data Science And Engineering (TrustCom/BigDataSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsc/2019/6783/0/08665581", "title": "Examining Sentiment Analysis When Evaluating Survey Responses", "doi": null, "abstractUrl": "/proceedings-article/icsc/2019/08665581/18qcfSspr6E", "parentPublication": { "id": "proceedings/icsc/2019/6783/0", "title": "2019 IEEE 13th International Conference on Semantic Computing (ICSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/t4e/2019/4227/0/422700a102", "title": "Evaluating Effectiveness of a Teacher Training MOOC: Industry Perspective", "doi": null, "abstractUrl": "/proceedings-article/t4e/2019/422700a102/1hgtHpFQT8A", "parentPublication": { "id": "proceedings/t4e/2019/4227/0", "title": "2019 IEEE Tenth International Conference on Technology for Education (T4E)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/03/09275378", "title": "Task-Based Effectiveness of Interactive Contiguous Area Cartograms", "doi": null, "abstractUrl": "/journal/tg/2021/03/09275378/1pcOsFJxDYQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07817889", "articleId": "13rRUynHujf", "__typename": "AdjacentArticleType" }, "next": { "fno": "07831400", "articleId": "13rRUyYjK5m", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNvTBB89", "title": "Feb.", "year": "2018", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "24", "label": "Feb.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUyYjK5m", "doi": "10.1109/TVCG.2017.2657634", "abstract": "We propose a visual marker embedding method for the pose estimation of a projection surface to correctly map projected images onto the surface. Assuming that the surface is fabricated by a full-color or multi-material three-dimensional (3D) printer, we propose to automatically embed visual markers on the surface with mechanical accuracy. The appearance of the marker is designed such that the marker is detected by infrared cameras even when printed on a non-planar surface while its appearance can be diminished by the projection to be as imperceptible as possible to human observers. The marker placement is optimized using a genetic algorithm to maximize the number of valid viewpoints from which the pose of the object can be estimated correctly using a stereo camera system. We also propose a radiometric compensation technique to quickly diminish the marker appearance. Experimental results confirm that the pose of projection objects are correctly estimated while the appearance of the markers was diminished to an imperceptible level. At the same time, we confirmed the limitations of the current method; only one object can be handled, and pose estimation is not performed at interactive frame rates. Finally, we demonstrate the proposed technique to show that it works successfully for various surface shapes and target textures.", "abstracts": [ { "abstractType": "Regular", "content": "We propose a visual marker embedding method for the pose estimation of a projection surface to correctly map projected images onto the surface. Assuming that the surface is fabricated by a full-color or multi-material three-dimensional (3D) printer, we propose to automatically embed visual markers on the surface with mechanical accuracy. The appearance of the marker is designed such that the marker is detected by infrared cameras even when printed on a non-planar surface while its appearance can be diminished by the projection to be as imperceptible as possible to human observers. The marker placement is optimized using a genetic algorithm to maximize the number of valid viewpoints from which the pose of the object can be estimated correctly using a stereo camera system. We also propose a radiometric compensation technique to quickly diminish the marker appearance. Experimental results confirm that the pose of projection objects are correctly estimated while the appearance of the markers was diminished to an imperceptible level. At the same time, we confirmed the limitations of the current method; only one object can be handled, and pose estimation is not performed at interactive frame rates. Finally, we demonstrate the proposed technique to show that it works successfully for various surface shapes and target textures.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose a visual marker embedding method for the pose estimation of a projection surface to correctly map projected images onto the surface. Assuming that the surface is fabricated by a full-color or multi-material three-dimensional (3D) printer, we propose to automatically embed visual markers on the surface with mechanical accuracy. The appearance of the marker is designed such that the marker is detected by infrared cameras even when printed on a non-planar surface while its appearance can be diminished by the projection to be as imperceptible as possible to human observers. The marker placement is optimized using a genetic algorithm to maximize the number of valid viewpoints from which the pose of the object can be estimated correctly using a stereo camera system. We also propose a radiometric compensation technique to quickly diminish the marker appearance. Experimental results confirm that the pose of projection objects are correctly estimated while the appearance of the markers was diminished to an imperceptible level. At the same time, we confirmed the limitations of the current method; only one object can be handled, and pose estimation is not performed at interactive frame rates. Finally, we demonstrate the proposed technique to show that it works successfully for various surface shapes and target textures.", "title": "Fabricating Diminishable Visual Markers for Geometric Registration in Projection Mapping", "normalizedTitle": "Fabricating Diminishable Visual Markers for Geometric Registration in Projection Mapping", "fno": "07831400", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Three Dimensional Displays", "Cameras", "Printers", "Visualization", "Shape", "Surface Treatment", "Fabrication", "Digital Fabrication", "Spatial Augmented Reality", "Projection Mapping", "Diminished Reality", "Marker Based Tracking" ], "authors": [ { "givenName": "Hirotaka", "surname": "Asayama", "fullName": "Hirotaka Asayama", "affiliation": "Graduate School of Engineering Science, Osaka, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Daisuke", "surname": "Iwai", "fullName": "Daisuke Iwai", "affiliation": "Graduate School of Engineering Science, Osaka, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Kosuke", "surname": "Sato", "fullName": "Kosuke Sato", "affiliation": "Graduate School of Engineering Science, Osaka, Japan", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2018-02-01 00:00:00", "pubType": "trans", "pages": "1091-1102", "year": "2018", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2013/2840/0/2840c632", "title": "Geometric Registration Based on Distortion Estimation", "doi": null, "abstractUrl": "/proceedings-article/iccv/2013/2840c632/12OmNButq7h", "parentPublication": { "id": "proceedings/iccv/2013/2840/0", "title": "2013 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2017/2937/0/2937a114", "title": "Detecting Good Surface for Improvisatory Visual Projection", "doi": null, "abstractUrl": "/proceedings-article/ism/2017/2937a114/12OmNCd2roE", "parentPublication": { "id": "proceedings/ism/2017/2937/0", "title": "2017 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2014/4284/0/4284a173", "title": "Texture Mapping Based on Projection and Viewpoints", "doi": null, "abstractUrl": "/proceedings-article/icdh/2014/4284a173/12OmNvjgWVu", "parentPublication": { "id": "proceedings/icdh/2014/4284/0", "title": "2014 5th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cad-graphics/2015/8020/0/07450396", "title": "Parameter Estimation of Point Projection on NURBS Curves and Surfaces", "doi": null, "abstractUrl": "/proceedings-article/cad-graphics/2015/07450396/12OmNwwd2UN", "parentPublication": { "id": "proceedings/cad-graphics/2015/8020/0", "title": "2015 14th International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2015/8332/0/8332a487", "title": "The Geometry of Colorful, Lenticular Fiducial Markers", "doi": null, "abstractUrl": "/proceedings-article/3dv/2015/8332a487/12OmNz61dwY", "parentPublication": { "id": "proceedings/3dv/2015/8332/0", "title": "2015 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/04/ttg201404540", "title": "Geometrically-Correct Projection-Based Texture Mapping onto a Deformable Object", "doi": null, "abstractUrl": "/journal/tg/2014/04/ttg201404540/13rRUxcsYLO", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/11/08007213", "title": "Geometric and Photometric Consistency in a Mixed Video and Galvanoscopic Scanning Laser Projection Mapping System", "doi": null, "abstractUrl": "/journal/tg/2017/11/08007213/13rRUxcsYLX", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/05/08998378", "title": "FibAR: Embedding Optical Fibers in 3D Printed Objects for Active Markers in Dynamic Projection Mapping", "doi": null, "abstractUrl": "/journal/tg/2020/05/08998378/1hpPCL9mirK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2020/1331/0/09102813", "title": "Projection Mapping System To A Widely Dynamic Sphere With Circumferential Markers", "doi": null, "abstractUrl": "/proceedings-article/icme/2020/09102813/1kwqWza3GI8", "parentPublication": { "id": "proceedings/icme/2020/1331/0", "title": "2020 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09536434", "title": "Dynamic Projection Mapping for Robust Sphere Posture Tracking Using Uniform/Biased Circumferential Markers", "doi": null, "abstractUrl": "/journal/tg/2022/12/09536434/1wREa2FncUE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07792176", "articleId": "13rRUwghd56", "__typename": "AdjacentArticleType" }, "next": { "fno": "07835631", "articleId": "13rRUx0xPIO", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTYesSB", "name": "ttg201802-07831400s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201802-07831400s1.zip", "extension": "zip", "size": "19.8 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNvTBB89", "title": "Feb.", "year": "2018", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "24", "label": "Feb.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUx0xPIO", "doi": "10.1109/TVCG.2017.2660488", "abstract": "Escher transmutation is a graphic art that smoothly transforms one tile pattern into another tile pattern with dual perception. A classic example is the artwork called Sky and Water, in which a compelling figure-ground arrangement is applied to portray the transmutation of a bird in sky and a fish in water. The shape of a bird is progressively deformed and dissolves into the background while the background gradually reveals the shape of a fish. This paper introduces a system to create a variety of Escher-like transmutations, which includes the algorithms for initializing a tile pattern with dual figure-ground arrangement, for searching for the best matched shape of a user-specified motif from a database, and for transforming the content and shapes of tile patterns using a content-aware warping technique. The proposed system, integrating the graphic techniques of tile initialization, shape matching, and shape warping, allows users to create various Escher-like transmutations with minimal user interaction. Experimental results and conducted user studies demonstrate the feasibility and flexibility of the proposed system in Escher art generation.", "abstracts": [ { "abstractType": "Regular", "content": "Escher transmutation is a graphic art that smoothly transforms one tile pattern into another tile pattern with dual perception. A classic example is the artwork called Sky and Water, in which a compelling figure-ground arrangement is applied to portray the transmutation of a bird in sky and a fish in water. The shape of a bird is progressively deformed and dissolves into the background while the background gradually reveals the shape of a fish. This paper introduces a system to create a variety of Escher-like transmutations, which includes the algorithms for initializing a tile pattern with dual figure-ground arrangement, for searching for the best matched shape of a user-specified motif from a database, and for transforming the content and shapes of tile patterns using a content-aware warping technique. The proposed system, integrating the graphic techniques of tile initialization, shape matching, and shape warping, allows users to create various Escher-like transmutations with minimal user interaction. Experimental results and conducted user studies demonstrate the feasibility and flexibility of the proposed system in Escher art generation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Escher transmutation is a graphic art that smoothly transforms one tile pattern into another tile pattern with dual perception. A classic example is the artwork called Sky and Water, in which a compelling figure-ground arrangement is applied to portray the transmutation of a bird in sky and a fish in water. The shape of a bird is progressively deformed and dissolves into the background while the background gradually reveals the shape of a fish. This paper introduces a system to create a variety of Escher-like transmutations, which includes the algorithms for initializing a tile pattern with dual figure-ground arrangement, for searching for the best matched shape of a user-specified motif from a database, and for transforming the content and shapes of tile patterns using a content-aware warping technique. The proposed system, integrating the graphic techniques of tile initialization, shape matching, and shape warping, allows users to create various Escher-like transmutations with minimal user interaction. Experimental results and conducted user studies demonstrate the feasibility and flexibility of the proposed system in Escher art generation.", "title": "Generation of Escher Arts with Dual Perception", "normalizedTitle": "Generation of Escher Arts with Dual Perception", "fno": "07835631", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Shape", "Art", "Databases", "Interpolation", "Electronic Mail", "Transforms", "Escher Art", "Shape Matching", "Content Aware Warping" ], "authors": [ { "givenName": "Shih-Syun", "surname": "Lin", "fullName": "Shih-Syun Lin", "affiliation": "Department of Computer Science and Engineering, National Taiwan Ocean University, Keelung, Taiwan, R.O.C.", "__typename": "ArticleAuthorType" }, { "givenName": "Charles C.", "surname": "Morace", "fullName": "Charles C. Morace", "affiliation": "Department of Computer Science and Information Engineering, National Cheng Kung University, Tainan, Taiwan, R.O.C", "__typename": "ArticleAuthorType" }, { "givenName": "Chao-Hung", "surname": "Lin", "fullName": "Chao-Hung Lin", "affiliation": "Department of Geomatics, National Cheng Kung University, Tainan, Taiwan, R.O.C.", "__typename": "ArticleAuthorType" }, { "givenName": "Li-Fong", "surname": "Hsu", "fullName": "Li-Fong Hsu", "affiliation": "Department of Computer Science and Information Engineering, National Cheng Kung University, Tainan, Taiwan, R.O.C", "__typename": "ArticleAuthorType" }, { "givenName": "Tong-Yee", "surname": "Lee", "fullName": "Tong-Yee Lee", "affiliation": "Department of Computer Science and Information Engineering, National Cheng Kung University, Tainan, Taiwan, R.O.C", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2018-02-01 00:00:00", "pubType": "trans", "pages": "1103-1113", "year": "2018", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iv/2017/0831/0/0831a405", "title": "GC1 Cubic Trigonometric Spline Function with its Geometric Attributes", "doi": null, "abstractUrl": "/proceedings-article/iv/2017/0831a405/12OmNAlvHMJ", "parentPublication": { "id": "proceedings/iv/2017/0831/0", "title": "2017 21st International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2017/0852/0/0852a008", "title": "A Rational Quadratic Trigonometric Spline with Interval Shape Control", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2017/0852a008/12OmNqGA5eh", "parentPublication": { "id": "proceedings/cgiv/2017/0852/0", "title": "2017 14th International Conference on Computer Graphics, Imaging and Visualization (CGiV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nicoint/2017/5332/0/5332a095", "title": "Evaluation of Ikebana Based on Geometric Shape Arrangement", "doi": null, "abstractUrl": "/proceedings-article/nicoint/2017/5332a095/12OmNsbY6TS", "parentPublication": { "id": "proceedings/nicoint/2017/5332/0", "title": "2017 Nicograph International (NicoInt)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isise/2008/3494/1/3494a340", "title": "Computing the Diameters of Abelian Cayley Digraphs with Degree 2", "doi": null, "abstractUrl": "/proceedings-article/isise/2008/3494a340/12OmNvJXeB9", "parentPublication": { "id": "proceedings/isise/2008/3494/1", "title": "2008 International Symposium on Information Science and Engieering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460466", "title": "Ensemble symbol recognition with Hough forest", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460466/12OmNwx3Qcd", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2015/7660/0/7660a190", "title": "[POSTER] Manipulating Haptic Shape Perception by Visual Surface Deformation and Finger Displacement in Spatial Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2015/7660a190/12OmNznkK1w", "parentPublication": { "id": "proceedings/ismar/2015/7660/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2003/03/l0307", "title": "On the Parallel Execution Time of Tiled Loops", "doi": null, "abstractUrl": "/journal/td/2003/03/l0307/13rRUxBJhuR", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/03/ttg2014030436", "title": "Optimized Synthesis of Art Patterns and Layered Textures", "doi": null, "abstractUrl": "/journal/tg/2014/03/ttg2014030436/13rRUyfKIHO", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2020/9360/0/09151074", "title": "Generalized Autoencoder for Volumetric Shape Generation", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2020/09151074/1lPHe9C5kiY", "parentPublication": { "id": "proceedings/cvprw/2020/9360/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/candarw/2020/9919/0/991900a151", "title": "Art Font Image Generation with Conditional Generative Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/candarw/2020/991900a151/1rqEAweDigM", "parentPublication": { "id": "proceedings/candarw/2020/9919/0", "title": "2020 Eighth International Symposium on Computing and Networking Workshops (CANDARW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07831400", "articleId": "13rRUyYjK5m", "__typename": "AdjacentArticleType" }, "next": { "fno": "07833186", "articleId": "13rRUIM2VBM", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNvTBB89", "title": "Feb.", "year": "2018", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "24", "label": "Feb.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUIM2VBM", "doi": "10.1109/TVCG.2017.2657751", "abstract": "Stylizing a 3D model with characteristic shapes or appearances is common in product design, particularly in the design of 3D model merchandise, such as souvenirs, toys, furniture, and stylized items. A model stylization approach is proposed in this study. The approach combines base and style models while preserving user-specified shape features of the base model and the attractive features of the style model with limited assistance from a user. The two models are first combined at the topological level. A tree-growing technique is utilized to search for all possible combinations of the two models. Second, the models are combined at textural and geometric levels by employing a morphing technique. Results show that the proposed approach generates various appealing models and allows users to control the diversity of the output models and adjust the blending degree between the base and style models. The results of this work are also experimentally compared with those of a recent work through a user study. The comparison indicates that our results are more appealing, feature-preserving, and reasonable than those of the compared previous study. The proposed system allows product designers to easily explore design possibilities and assists novice users in creating their own stylized models.", "abstracts": [ { "abstractType": "Regular", "content": "Stylizing a 3D model with characteristic shapes or appearances is common in product design, particularly in the design of 3D model merchandise, such as souvenirs, toys, furniture, and stylized items. A model stylization approach is proposed in this study. The approach combines base and style models while preserving user-specified shape features of the base model and the attractive features of the style model with limited assistance from a user. The two models are first combined at the topological level. A tree-growing technique is utilized to search for all possible combinations of the two models. Second, the models are combined at textural and geometric levels by employing a morphing technique. Results show that the proposed approach generates various appealing models and allows users to control the diversity of the output models and adjust the blending degree between the base and style models. The results of this work are also experimentally compared with those of a recent work through a user study. The comparison indicates that our results are more appealing, feature-preserving, and reasonable than those of the compared previous study. The proposed system allows product designers to easily explore design possibilities and assists novice users in creating their own stylized models.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Stylizing a 3D model with characteristic shapes or appearances is common in product design, particularly in the design of 3D model merchandise, such as souvenirs, toys, furniture, and stylized items. A model stylization approach is proposed in this study. The approach combines base and style models while preserving user-specified shape features of the base model and the attractive features of the style model with limited assistance from a user. The two models are first combined at the topological level. A tree-growing technique is utilized to search for all possible combinations of the two models. Second, the models are combined at textural and geometric levels by employing a morphing technique. Results show that the proposed approach generates various appealing models and allows users to control the diversity of the output models and adjust the blending degree between the base and style models. The results of this work are also experimentally compared with those of a recent work through a user study. The comparison indicates that our results are more appealing, feature-preserving, and reasonable than those of the compared previous study. The proposed system allows product designers to easily explore design possibilities and assists novice users in creating their own stylized models.", "title": "Geometric and Textural Blending for 3D Model Stylization", "normalizedTitle": "Geometric and Textural Blending for 3D Model Stylization", "fno": "07833186", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Solid Modeling", "Computational Modeling", "Three Dimensional Displays", "Shape", "Feature Extraction", "Topology", "Geometry", "Computer Graphics", "Modeling" ], "authors": [ { "givenName": "Yi-Jheng", "surname": "Huang", "fullName": "Yi-Jheng Huang", "affiliation": "Department of Computer Science, National Chiao Tung University, Hsinchu, Taiwan", "__typename": "ArticleAuthorType" }, { "givenName": "Wen-Chieh", "surname": "Lin", "fullName": "Wen-Chieh Lin", "affiliation": "Department of Computer Science, National Chiao Tung University, Hsinchu, Taiwan", "__typename": "ArticleAuthorType" }, { "givenName": "I-Cheng", "surname": "Yeh", "fullName": "I-Cheng Yeh", "affiliation": "Department of Computer Science and Engineering, Yuan Ze University, Taoyuan, Taiwan", "__typename": "ArticleAuthorType" }, { "givenName": "Tong-Yee", "surname": "Lee", "fullName": "Tong-Yee Lee", "affiliation": "Department of Computer Science and Information Engineering, National Cheng-Kung University, Tainan, Taiwan", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2018-02-01 00:00:00", "pubType": "trans", "pages": "1114-1126", "year": "2018", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2021/2812/0/281200m2436", "title": "3DStyleNet: Creating 3D Shapes with Geometric and Texture Style Variations", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200m2436/1BmFh10W9Bm", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200o4184", "title": "Neural Strokes: Stylized Line Drawing of 3D Shapes", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200o4184/1BmI4k03VrG", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600o4320", "title": "Geometric and Textural Augmentation for Domain Gap Reduction", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600o4320/1H0KCjHr1NC", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8321", "title": "StylizedNeRF: Consistent 3D Scene Stylization as Stylized NeRF via 2D-3D Mutual Learning", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8321/1H0L3Z762gU", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600n3482", "title": "Text2Mesh: Text-Driven Neural Stylization for Meshes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600n3482/1H1hBnpgbAI", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600q6252", "title": "3D Photo Stylization: Learning to Generate Stylized Novel Views from a Single Image", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600q6252/1H1n8rSbNW8", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccgiv/2022/9250/0/925000a006", "title": "An Example&#x00AD;Based Method for 3D Real&#x00AD;time Rendering In Chinese Ink Style", "doi": null, "abstractUrl": "/proceedings-article/iccgiv/2022/925000a006/1LxfoK6kZzi", "parentPublication": { "id": "proceedings/iccgiv/2022/9250/0", "title": "2022 2nd International Conference on Computer Graphics, Image and Virtualization (ICCGIV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093489", "title": "Self-Contained Stylization via Steganography for Reverse and Serial Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093489/1jPbwyJ3hnO", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/10/09485060", "title": "Non-Local Representation Based Mutual Affine-Transfer Network for Photorealistic Stylization", "doi": null, "abstractUrl": "/journal/tp/2022/10/09485060/1veojT6P4Gs", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/02/09547845", "title": "Exemplar-Based 3D Portrait Stylization", "doi": null, "abstractUrl": "/journal/tg/2023/02/09547845/1x9TLh9tiow", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07835631", "articleId": "13rRUx0xPIO", "__typename": "AdjacentArticleType" }, "next": { "fno": "07829422", "articleId": "13rRUyYSWl6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNvTBB89", "title": "Feb.", "year": "2018", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "24", "label": "Feb.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUyYSWl6", "doi": "10.1109/TVCG.2017.2655523", "abstract": "Porous structures such as trabecular bone are widely seen in nature. These structures are lightweight and exhibit strong mechanical properties. In this paper, we present a method to generate bone-like porous structures as lightweight infill for additive manufacturing. Our method builds upon and extends voxel-wise topology optimization. In particular, for the purpose of generating sparse yet stable structures distributed in the interior of a given shape, we propose upper bounds on the localized material volume in the proximity of each voxel in the design domain. We then aggregate the local per-voxel constraints by their p-norm into an equivalent global constraint, in order to facilitate an efficient optimization process. Implemented on a high-resolution topology optimization framework, our results demonstrate mechanically optimized, detailed porous structures which mimic those found in nature. We further show variants of the optimized structures subject to different design specifications, and we analyze the optimality and robustness of the obtained structures.", "abstracts": [ { "abstractType": "Regular", "content": "Porous structures such as trabecular bone are widely seen in nature. These structures are lightweight and exhibit strong mechanical properties. In this paper, we present a method to generate bone-like porous structures as lightweight infill for additive manufacturing. Our method builds upon and extends voxel-wise topology optimization. In particular, for the purpose of generating sparse yet stable structures distributed in the interior of a given shape, we propose upper bounds on the localized material volume in the proximity of each voxel in the design domain. We then aggregate the local per-voxel constraints by their p-norm into an equivalent global constraint, in order to facilitate an efficient optimization process. Implemented on a high-resolution topology optimization framework, our results demonstrate mechanically optimized, detailed porous structures which mimic those found in nature. We further show variants of the optimized structures subject to different design specifications, and we analyze the optimality and robustness of the obtained structures.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Porous structures such as trabecular bone are widely seen in nature. These structures are lightweight and exhibit strong mechanical properties. In this paper, we present a method to generate bone-like porous structures as lightweight infill for additive manufacturing. Our method builds upon and extends voxel-wise topology optimization. In particular, for the purpose of generating sparse yet stable structures distributed in the interior of a given shape, we propose upper bounds on the localized material volume in the proximity of each voxel in the design domain. We then aggregate the local per-voxel constraints by their p-norm into an equivalent global constraint, in order to facilitate an efficient optimization process. Implemented on a high-resolution topology optimization framework, our results demonstrate mechanically optimized, detailed porous structures which mimic those found in nature. We further show variants of the optimized structures subject to different design specifications, and we analyze the optimality and robustness of the obtained structures.", "title": "Infill Optimization for Additive Manufacturing—Approaching Bone-Like Porous Structures", "normalizedTitle": "Infill Optimization for Additive Manufacturing—Approaching Bone-Like Porous Structures", "fno": "07829422", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Optimisation", "Porous Materials", "Topology", "Trabecular Bone", "Voxel Wise Topology Optimization", "Sparse Yet Stable Structures", "Per Voxel Constraints", "Efficient Optimization Process", "High Resolution Topology Optimization Framework", "Optimized Structures", "Mechanical Properties", "Porous Structures", "Additive Manufacturing Approaching Bone Like Porous Structures", "Optimization", "Bones", "Topology", "Solids", "Three Dimensional Printing", "Shape", "Mechanical Factors", "Infill", "Additive Manufacturing", "Trabecular Bone", "Porous Structures", "Topology Optimization" ], "authors": [ { "givenName": "Jun", "surname": "Wu", "fullName": "Jun Wu", "affiliation": "Department of Mechanical Engineering, Technical University of Denmark, Lyngby, Denmark", "__typename": "ArticleAuthorType" }, { "givenName": "Niels", "surname": "Aage", "fullName": "Niels Aage", "affiliation": "Department of Mechanical Engineering, Technical University of Denmark, Lyngby, Denmark", "__typename": "ArticleAuthorType" }, { "givenName": "Rüdiger", "surname": "Westermann", "fullName": "Rüdiger Westermann", "affiliation": "Department of Computer Science, Technische Universität München, Garching, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Ole", "surname": "Sigmund", "fullName": "Ole Sigmund", "affiliation": "Department of Mechanical Engineering, Technical University of Denmark, Lyngby, Denmark", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2018-02-01 00:00:00", "pubType": "trans", "pages": "1127-1140", "year": "2018", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/apcase/2015/7588/0/7588a409", "title": "Fuzzy Inference System Applied to Mechanical Design of Bone Tissue Engineering Scaffolds", "doi": null, "abstractUrl": "/proceedings-article/apcase/2015/7588a409/12OmNwErpRw", "parentPublication": { "id": "proceedings/apcase/2015/7588/0", "title": "2015 Asia-Pacific Conference on Computer Aided System Engineering (APCASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/idt/2009/5750/0/05404100", "title": "Stable optical filter using porous silicon technology", "doi": null, "abstractUrl": "/proceedings-article/idt/2009/05404100/12OmNy1SFDZ", "parentPublication": { "id": "proceedings/idt/2009/5750/0", "title": "2009 4th International Design and Test Workshop (IDT 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2011/01/tth2011010039", "title": "Physics-Based Haptic Simulation of Bone Machining", "doi": null, "abstractUrl": "/journal/th/2011/01/tth2011010039/13rRUwIF6le", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cse-euc/2017/3220/1/08005847", "title": "A Volumetric Shape Registration Based on Locally Affine-Invariant Constraint", "doi": null, "abstractUrl": "/proceedings-article/cse-euc/2017/08005847/17D45XoXP6p", "parentPublication": { "id": "proceedings/cse-euc/2017/3220/1", "title": "2017 IEEE International Conference on Computational Science and Engineering (CSE) and IEEE International Conference on Embedded and Ubiquitous Computing (EUC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aiam/2021/1732/0/173200a552", "title": "Preparation of Gradient Porous TiAl Intermetallics with additive manufacturing technology", "doi": null, "abstractUrl": "/proceedings-article/aiam/2021/173200a552/1BzTYFOw7mg", "parentPublication": { "id": "proceedings/aiam/2021/1732/0", "title": "2021 3rd International Conference on Artificial Intelligence and Advanced Manufacture (AIAM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibe/2022/8487/0/848700a142", "title": "3D printing of bioceramic/polycaprolactone composite scaffolds for bone tissue engineering", "doi": null, "abstractUrl": "/proceedings-article/bibe/2022/848700a142/1J6hCI5IlWM", "parentPublication": { "id": "proceedings/bibe/2022/8487/0", "title": "2022 IEEE 22nd International Conference on Bioinformatics and Bioengineering (BIBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icedme/2020/8145/0/09122190", "title": "Effect of internal and external porous structure design on stress distribution of implant bridge: A Finite element analysis", "doi": null, "abstractUrl": "/proceedings-article/icedme/2020/09122190/1kRSAvopeJa", "parentPublication": { "id": "proceedings/icedme/2020/8145/0", "title": "2020 3rd International Conference on Electron Device and Mechanical Engineering (ICEDME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/07/09258406", "title": "Efficient Representation and Optimization for TPMS-Based Porous Structures", "doi": null, "abstractUrl": "/journal/tg/2022/07/09258406/1oHi1OQbVOo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2020/9234/0/923400a304", "title": "Design and optimization of bionic bone with micropore structure suitable for 3D printing", "doi": null, "abstractUrl": "/proceedings-article/icdh/2020/923400a304/1uGXVgnGyHK", "parentPublication": { "id": "proceedings/icdh/2020/9234/0", "title": "2020 8th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mlise/2021/1736/0/173600a521", "title": "Application of Minimal Curved Surface in the Design and Preparation of Bone Scaffold", "doi": null, "abstractUrl": "/proceedings-article/mlise/2021/173600a521/1yOW1W9VunK", "parentPublication": { "id": "proceedings/mlise/2021/1736/0", "title": "2021 International Conference on Machine Learning and Intelligent Systems Engineering (MLISE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07833186", "articleId": "13rRUIM2VBM", "__typename": "AdjacentArticleType" }, "next": { "fno": "07817898", "articleId": "13rRUzphDy2", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNvTBB89", "title": "Feb.", "year": "2018", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "24", "label": "Feb.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUzphDy2", "doi": "10.1109/TVCG.2017.2653106", "abstract": "Line graphs are usually considered to be the best choice for visualizing time series data, whereas sometimes also scatter plots are used for showing main trends. So far there are no guidelines that indicate which of these visualization methods better display trends in time series for a given canvas. Assuming that the main information in a time series is its overall trend, we propose an algorithm that automatically picks the visualization method that reveals this trend best. This is achieved by measuring the visual consistency between the trend curve represented by a LOESS fit and the trend described by a scatter plot or a line graph. To measure the consistency between our algorithm and user choices, we performed an empirical study with a series of controlled experiments that show a large correspondence. In a factor analysis we furthermore demonstrate that various visual and data factors have effects on the preference for a certain type of visualization.", "abstracts": [ { "abstractType": "Regular", "content": "Line graphs are usually considered to be the best choice for visualizing time series data, whereas sometimes also scatter plots are used for showing main trends. So far there are no guidelines that indicate which of these visualization methods better display trends in time series for a given canvas. Assuming that the main information in a time series is its overall trend, we propose an algorithm that automatically picks the visualization method that reveals this trend best. This is achieved by measuring the visual consistency between the trend curve represented by a LOESS fit and the trend described by a scatter plot or a line graph. To measure the consistency between our algorithm and user choices, we performed an empirical study with a series of controlled experiments that show a large correspondence. In a factor analysis we furthermore demonstrate that various visual and data factors have effects on the preference for a certain type of visualization.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Line graphs are usually considered to be the best choice for visualizing time series data, whereas sometimes also scatter plots are used for showing main trends. So far there are no guidelines that indicate which of these visualization methods better display trends in time series for a given canvas. Assuming that the main information in a time series is its overall trend, we propose an algorithm that automatically picks the visualization method that reveals this trend best. This is achieved by measuring the visual consistency between the trend curve represented by a LOESS fit and the trend described by a scatter plot or a line graph. To measure the consistency between our algorithm and user choices, we performed an empirical study with a series of controlled experiments that show a large correspondence. In a factor analysis we furthermore demonstrate that various visual and data factors have effects on the preference for a certain type of visualization.", "title": "Line Graph or Scatter Plot? Automatic Selection of Methods for Visualizing Trends in Time Series", "normalizedTitle": "Line Graph or Scatter Plot? Automatic Selection of Methods for Visualizing Trends in Time Series", "fno": "07817898", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Market Research", "Time Series Analysis", "Data Visualization", "Visualization", "Bandwidth", "Kernel", "Estimation", "Line Graph", "Scatter Plot", "Time Series", "Trend" ], "authors": [ { "givenName": "Yunhai", "surname": "Wang", "fullName": "Yunhai Wang", "affiliation": "Shandong University, Jinan, China", "__typename": "ArticleAuthorType" }, { "givenName": "Fubo", "surname": "Han", "fullName": "Fubo Han", "affiliation": "Shandong University, Jinan, China", "__typename": "ArticleAuthorType" }, { "givenName": "Lifeng", "surname": "Zhu", "fullName": "Lifeng Zhu", "affiliation": "Southeast University, Nanjing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Oliver", "surname": "Deussen", "fullName": "Oliver Deussen", "affiliation": "SIAT Shenzhen, University of Konstanz, Konstanz, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Baoquan", "surname": "Chen", "fullName": "Baoquan Chen", "affiliation": "Shandong University, Jinan, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2018-02-01 00:00:00", "pubType": "trans", "pages": "1141-1154", "year": "2018", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ieee-infovis/2003/2055/0/20550022", "title": "Coordinated Graph and Scatter-Plot Views for the Visual Exploration of Microarray Time-Series Data", "doi": null, "abstractUrl": "/proceedings-article/ieee-infovis/2003/20550022/12OmNwdL7k4", "parentPublication": { "id": "proceedings/ieee-infovis/2003/2055/0", "title": "Information Visualization, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2013/4892/0/4892b522", "title": "Enhancing Scatter Plots Using Ellipsoid Pixel Placement and Shading", "doi": null, "abstractUrl": "/proceedings-article/hicss/2013/4892b522/12OmNzwpUnq", "parentPublication": { "id": "proceedings/hicss/2013/4892/0", "title": "2013 46th Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07536203", "title": "Multi-Granular Trend Detection for Time-Series Analysis", "doi": null, "abstractUrl": "/journal/tg/2017/01/07536203/13rRUIM2VBK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/12/ttg2012122809", "title": "Visualizing Student Histories Using Clustering and Composition", "doi": null, "abstractUrl": "/journal/tg/2012/12/ttg2012122809/13rRUwI5TXx", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2015/10/07015603", "title": "Unsupervised Discovery of Subspace Trends", "doi": null, "abstractUrl": "/journal/tp/2015/10/07015603/13rRUxlgxUC", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/12/ttg2013122326", "title": "Selecting the Aspect Ratio of a Scatter Plot Based on Its Delaunay Triangulation", "doi": null, "abstractUrl": "/journal/tg/2013/12/ttg2013122326/13rRUyuNswX", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ai4i/2018/9209/0/08665695", "title": "Multi-Layer Nested Scatter Plot a Data Wrangling Method for Correlated Multi-Channel Time Series Signals", "doi": null, "abstractUrl": "/proceedings-article/ai4i/2018/08665695/18qc20o6UKI", "parentPublication": { "id": "proceedings/ai4i/2018/9209/0", "title": "2018 First International Conference on Artificial Intelligence for Industries (AI4I)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2018/6861/0/08802502", "title": "Time Series Projection to Highlight Trends and Outliers", "doi": null, "abstractUrl": "/proceedings-article/vast/2018/08802502/1cJ6YgVgISI", "parentPublication": { "id": "proceedings/vast/2018/6861/0", "title": "2018 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2019/2838/0/283800a191", "title": "Visual Analytics for Analyzing Technological Trends from Text", "doi": null, "abstractUrl": "/proceedings-article/iv/2019/283800a191/1cMFbEv4BCE", "parentPublication": { "id": "proceedings/iv/2019/2838/0", "title": "2019 23rd International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/jcdl/2019/1547/0/154700a438", "title": "ScholarSight: Visualizing Temporal Trends of Scientific Concepts", "doi": null, "abstractUrl": "/proceedings-article/jcdl/2019/154700a438/1ckrJmFVckM", "parentPublication": { "id": "proceedings/jcdl/2019/1547/0", "title": "2019 ACM/IEEE Joint Conference on Digital Libraries (JCDL)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07829422", "articleId": "13rRUyYSWl6", "__typename": "AdjacentArticleType" }, "next": { "fno": "07829433", "articleId": "13rRUygBwhM", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXnFuO", "name": "ttg201802-07817898s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201802-07817898s1.zip", "extension": "zip", "size": "16 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNvTBB89", "title": "Feb.", "year": "2018", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "24", "label": "Feb.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUygBwhM", "doi": "10.1109/TVCG.2017.2656897", "abstract": "In this paper we present a novel GPU-friendly real-time voxelization technique for rendering homogeneous media that is defined by particles, e.g., fluids obtained from particle-based simulations such as Smoothed Particle Hydrodynamics (SPH). Our method computes view-adaptive binary voxelizations with on-the-fly compression of a tiled perspective voxel grid, achieving higher resolutions than previous approaches. It allows for interactive generation of realistic images, enabling advanced rendering techniques such as ray casting-based refraction and reflection, light scattering and absorption, and ambient occlusion. In contrast to previous methods, it does not rely on preprocessing such as expensive, and often coarse, scalar field conversion or mesh generation steps. Our method directly takes unsorted particle data as input. It can be further accelerated by identifying fully populated simulation cells during simulation. The extracted surface can be filtered to achieve smooth surface appearance. Finally, we provide a new scheme for accelerated ray casting inside the voxelization.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper we present a novel GPU-friendly real-time voxelization technique for rendering homogeneous media that is defined by particles, e.g., fluids obtained from particle-based simulations such as Smoothed Particle Hydrodynamics (SPH). Our method computes view-adaptive binary voxelizations with on-the-fly compression of a tiled perspective voxel grid, achieving higher resolutions than previous approaches. It allows for interactive generation of realistic images, enabling advanced rendering techniques such as ray casting-based refraction and reflection, light scattering and absorption, and ambient occlusion. In contrast to previous methods, it does not rely on preprocessing such as expensive, and often coarse, scalar field conversion or mesh generation steps. Our method directly takes unsorted particle data as input. It can be further accelerated by identifying fully populated simulation cells during simulation. The extracted surface can be filtered to achieve smooth surface appearance. Finally, we provide a new scheme for accelerated ray casting inside the voxelization.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper we present a novel GPU-friendly real-time voxelization technique for rendering homogeneous media that is defined by particles, e.g., fluids obtained from particle-based simulations such as Smoothed Particle Hydrodynamics (SPH). Our method computes view-adaptive binary voxelizations with on-the-fly compression of a tiled perspective voxel grid, achieving higher resolutions than previous approaches. It allows for interactive generation of realistic images, enabling advanced rendering techniques such as ray casting-based refraction and reflection, light scattering and absorption, and ambient occlusion. In contrast to previous methods, it does not rely on preprocessing such as expensive, and often coarse, scalar field conversion or mesh generation steps. Our method directly takes unsorted particle data as input. It can be further accelerated by identifying fully populated simulation cells during simulation. The extracted surface can be filtered to achieve smooth surface appearance. Finally, we provide a new scheme for accelerated ray casting inside the voxelization.", "title": "Memory-Efficient On-the-Fly Voxelization and Rendering of Particle Data", "normalizedTitle": "Memory-Efficient On-the-Fly Voxelization and Rendering of Particle Data", "fno": "07829433", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Rendering Computer Graphics", "Surface Morphology", "Casting", "Computational Modeling", "Acceleration", "Graphics Processing Units", "Data Visualization", "Surface Extraction", "Interactive Particle Visualization", "Ray Tracing" ], "authors": [ { "givenName": "Tobias", "surname": "Zirr", "fullName": "Tobias Zirr", "affiliation": "Karlsruhe Institute of Technology, Karlsruhe, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Carsten", "surname": "Dachsbacher", "fullName": "Carsten Dachsbacher", "affiliation": "Karlsruhe Institute of Technology, Karlsruhe, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2018-02-01 00:00:00", "pubType": "trans", "pages": "1155-1166", "year": "2018", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icvrv/2014/6854/0/6854a424", "title": "The Study of the Terrain Rendering Method Based on Ray Casting", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2014/6854a424/12OmNAoUTx7", "parentPublication": { "id": "proceedings/icvrv/2014/6854/0", "title": "2014 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2000/0478/0/04780197", "title": "Interactive Stereoscopic Rendering of Voxel-Based Terrain", "doi": null, "abstractUrl": "/proceedings-article/vr/2000/04780197/12OmNBPc8wv", "parentPublication": { "id": "proceedings/vr/2000/0478/0", "title": "Virtual Reality Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2001/7200/0/7200westermann", "title": "Accelerated Volume Ray-Casting using Texture Mapping", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2001/7200westermann/12OmNCbU30D", "parentPublication": { "id": "proceedings/ieee-vis/2001/7200/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgi/2004/2171/0/01309236", "title": "Efficient hardware voxelization", "doi": null, "abstractUrl": "/proceedings-article/cgi/2004/01309236/12OmNwNwzGm", "parentPublication": { "id": "proceedings/cgi/2004/2171/0", "title": "Proceedings. Computer Graphics International", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/unesst/2015/9852/0/9852a018", "title": "Complexity Evaluation of CT-Images for GPU-Based Volume Rendering", "doi": null, "abstractUrl": "/proceedings-article/unesst/2015/9852a018/12OmNxeutee", "parentPublication": { "id": "proceedings/unesst/2015/9852/0", "title": "2015 8th International Conference on u- and e- Service, Science and Technology (UNESST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/uksim/2008/3114/0/3114a372", "title": "A Particle Modeling for Rendering Irregular Volumes", "doi": null, "abstractUrl": "/proceedings-article/uksim/2008/3114a372/12OmNyRg4uB", "parentPublication": { "id": "proceedings/uksim/2008/3114/0", "title": "Computer Modeling and Simulation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/prs/1993/4920/0/00586079", "title": "Segmented ray casting for data parallel volume rendering", "doi": null, "abstractUrl": "/proceedings-article/prs/1993/00586079/12OmNybfr4E", "parentPublication": { "id": "proceedings/prs/1993/4920/0", "title": "Proceedings of 1993 IEEE Parallel Rendering Symposium", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1997/8262/0/82620191", "title": "Accelerated volume rendering using homogeneous region encoding", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1997/82620191/12OmNzVoBzB", "parentPublication": { "id": "proceedings/ieee-vis/1997/8262/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2003/04/v0580", "title": "Tricubic Interpolation of Discrete Surfaces for Binary Volumes", "doi": null, "abstractUrl": "/journal/tg/2003/04/v0580/13rRUxBa5bG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/06/08341814", "title": "Fast Ray-Scene Intersection for Interactive Shadow Rendering with Thousands of Dynamic Lights", "doi": null, "abstractUrl": "/journal/tg/2019/06/08341814/13rRUxly8T5", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07817898", "articleId": "13rRUzphDy2", "__typename": "AdjacentArticleType" }, "next": { "fno": "07814314", "articleId": "13rRUxYrbMm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXWRXl", "name": "ttg201802-07829433s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201802-07829433s1.zip", "extension": "zip", "size": "137 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNvTBB89", "title": "Feb.", "year": "2018", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "24", "label": "Feb.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxYrbMm", "doi": "10.1109/TVCG.2017.2648790", "abstract": "We present a novel data-driven approach to populate virtual road networks with realistic traffic flows. Specifically, given a limited set of vehicle trajectories as the input samples, our approach first synthesizes a large set of vehicle trajectories. By taking the spatio-temporal information of traffic flows as a 2D texture, the generation of new traffic flows can be formulated as a texture synthesis process, which is solved by minimizing a newly developed traffic texture energy. The synthesized output captures the spatio-temporal dynamics of the input traffic flows, and the vehicle interactions in it strictly follow traffic rules. After that, we position the synthesized vehicle trajectory data to virtual road networks using a cage-based registration scheme, where a few traffic-specific constraints are enforced to maintain each vehicle's original spatial location and synchronize its motion in concert with its neighboring vehicles. Our approach is intuitive to control and scalable to the complexity of virtual road networks. We validated our approach through many experiments and paired comparison user studies.", "abstracts": [ { "abstractType": "Regular", "content": "We present a novel data-driven approach to populate virtual road networks with realistic traffic flows. Specifically, given a limited set of vehicle trajectories as the input samples, our approach first synthesizes a large set of vehicle trajectories. By taking the spatio-temporal information of traffic flows as a 2D texture, the generation of new traffic flows can be formulated as a texture synthesis process, which is solved by minimizing a newly developed traffic texture energy. The synthesized output captures the spatio-temporal dynamics of the input traffic flows, and the vehicle interactions in it strictly follow traffic rules. After that, we position the synthesized vehicle trajectory data to virtual road networks using a cage-based registration scheme, where a few traffic-specific constraints are enforced to maintain each vehicle's original spatial location and synchronize its motion in concert with its neighboring vehicles. Our approach is intuitive to control and scalable to the complexity of virtual road networks. We validated our approach through many experiments and paired comparison user studies.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a novel data-driven approach to populate virtual road networks with realistic traffic flows. Specifically, given a limited set of vehicle trajectories as the input samples, our approach first synthesizes a large set of vehicle trajectories. By taking the spatio-temporal information of traffic flows as a 2D texture, the generation of new traffic flows can be formulated as a texture synthesis process, which is solved by minimizing a newly developed traffic texture energy. The synthesized output captures the spatio-temporal dynamics of the input traffic flows, and the vehicle interactions in it strictly follow traffic rules. After that, we position the synthesized vehicle trajectory data to virtual road networks using a cage-based registration scheme, where a few traffic-specific constraints are enforced to maintain each vehicle's original spatial location and synchronize its motion in concert with its neighboring vehicles. Our approach is intuitive to control and scalable to the complexity of virtual road networks. We validated our approach through many experiments and paired comparison user studies.", "title": "Realistic Data-Driven Traffic Flow Animation Using Texture Synthesis", "normalizedTitle": "Realistic Data-Driven Traffic Flow Animation Using Texture Synthesis", "fno": "07814314", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Vehicles", "Roads", "Trajectory", "Animation", "Two Dimensional Displays", "Solid Modeling", "Virtual Environments", "Traffic Flow Animation", "Crowd Simulation", "Data Driven Method", "Texture Synthesis" ], "authors": [ { "givenName": "Qianwen", "surname": "Chao", "fullName": "Qianwen Chao", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, P.R. China", "__typename": "ArticleAuthorType" }, { "givenName": "Zhigang", "surname": "Deng", "fullName": "Zhigang Deng", "affiliation": "Computer Science Department, University of Houston, Houston, TX", "__typename": "ArticleAuthorType" }, { "givenName": "Jiaping", "surname": "Ren", "fullName": "Jiaping Ren", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, P.R. China", "__typename": "ArticleAuthorType" }, { "givenName": "Qianqian", "surname": "Ye", "fullName": "Qianqian Ye", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, P.R. China", "__typename": "ArticleAuthorType" }, { "givenName": "Xiaogang", "surname": "Jin", "fullName": "Xiaogang Jin", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, P.R. China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2018-02-01 00:00:00", "pubType": "trans", "pages": "1167-1178", "year": "2018", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/mass/2016/2833/0/2833a281", "title": "TOP: Vehicle Trajectory Based Driving Speed Optimization Strategy for Travel Time Minimization and Road Congestion Avoidance", "doi": null, "abstractUrl": "/proceedings-article/mass/2016/2833a281/12OmNBTawzG", "parentPublication": { "id": "proceedings/mass/2016/2833/0", "title": "2016 IEEE 13th International Conference on Mobile Ad Hoc and Sensor Systems (MASS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccps/2013/1996/0/06603999", "title": "Collision free autonomous ground traffic: A model predictive control approach", "doi": null, "abstractUrl": "/proceedings-article/iccps/2013/06603999/12OmNrFkeVE", "parentPublication": { "id": "proceedings/iccps/2013/1996/0", "title": "2013 ACM/IEEE International Conference on Cyber-Physical Systems (ICCPS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cis/2008/3508/2/3508b415", "title": "Short-Term Traffic Flow Prediction Methods and the Correlation Analysis of Vehicle Speed and Traffic Flow", "doi": null, "abstractUrl": "/proceedings-article/cis/2008/3508b415/12OmNrJAej7", "parentPublication": { "id": "proceedings/cis/2008/3508/2", "title": "2008 International Conference on Computational Intelligence and Security", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icig/2004/2244/0/01410481", "title": "SVR-based facial texture driving for realistic expression synthesis", "doi": null, "abstractUrl": "/proceedings-article/icig/2004/01410481/12OmNx7XH4O", "parentPublication": { "id": "proceedings/icig/2004/2244/0", "title": "Proceedings. Third International Conference on Image and Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2009/3890/0/3890a257", "title": "A 3-D Traffic Animation System with Storm Surge Response", "doi": null, "abstractUrl": "/proceedings-article/ism/2009/3890a257/12OmNyGtje0", "parentPublication": { "id": "proceedings/ism/2009/3890/0", "title": "2009 11th IEEE International Symposium on Multimedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdcs/2012/4685/0/4685a142", "title": "NEAT: Road Network Aware Trajectory Clustering", "doi": null, "abstractUrl": "/proceedings-article/icdcs/2012/4685a142/12OmNz5JBOu", "parentPublication": { "id": "proceedings/icdcs/2012/4685/0", "title": "2012 IEEE 32nd International Conference on Distributed Computing Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2015/7143/0/7143a051", "title": "A Novel Traffic Flow Detection Method Using Multiple Statistical Parameters", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2015/7143a051/12OmNzn38Ls", "parentPublication": { "id": "proceedings/icmtma/2015/7143/0", "title": "2015 Seventh International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2015/02/06589570", "title": "Road-Network Aware Trajectory Clustering: Integrating Locality, Flow, and Density", "doi": null, "abstractUrl": "/journal/tm/2015/02/06589570/13rRUyeTViG", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2022/8045/0/10020759", "title": "Learning Latent Road Correlations from Trajectories", "doi": null, "abstractUrl": "/proceedings-article/big-data/2022/10020759/1KfTaomjtw4", "parentPublication": { "id": "proceedings/big-data/2022/8045/0", "title": "2022 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/03/08827957", "title": "Visual Cause Analytics for Traffic Congestion", "doi": null, "abstractUrl": "/journal/tg/2021/03/08827957/1ddbhyEXGWA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07829433", "articleId": "13rRUygBwhM", "__typename": "AdjacentArticleType" }, "next": { "fno": "07792706", "articleId": "13rRUxDqS8m", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXWRQX", "name": "ttg201802-07814314s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201802-07814314s1.zip", "extension": "zip", "size": "131 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNvTBB89", "title": "Feb.", "year": "2018", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "24", "label": "Feb.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxDqS8m", "doi": "10.1109/TVCG.2016.2642958", "abstract": "With the rapidly growing VR industry, in recent years, more and more attention has been paid for fire sound synthesis. However, previous methods usually ignore the influences of the different solid combustibles, leading to unrealistic sounding results. This paper proposes SSC (sounding solid combustibles), which is a new recording-driven non-premixed flame sound synthesis framework accounting for different solid combustibles. SSC consists of three components: combustion noise, vortex noise and popping sounds. The popping sounds are the keys to distinguish the differences of solid combustibles. To improve the quality of fire sound, we extract the features of popping sounds from the real fire sound examples based on modified Empirical Mode Decomposition (EMD) method. Unlike previous methods, we take both direct combustion noise and vortex noise into account because the fire model is non-premixed flame. In our method, we also greatly resolve the synchronization problem during blending the three components of SSC. Due to the introduction of the popping sounds, it is easy to distinguish the fire sounds of different solid combustibles by our method, with great potential in practical applications such as games, VR system, etc. Various experiments and comparisons are presented to validate our method.", "abstracts": [ { "abstractType": "Regular", "content": "With the rapidly growing VR industry, in recent years, more and more attention has been paid for fire sound synthesis. However, previous methods usually ignore the influences of the different solid combustibles, leading to unrealistic sounding results. This paper proposes SSC (sounding solid combustibles), which is a new recording-driven non-premixed flame sound synthesis framework accounting for different solid combustibles. SSC consists of three components: combustion noise, vortex noise and popping sounds. The popping sounds are the keys to distinguish the differences of solid combustibles. To improve the quality of fire sound, we extract the features of popping sounds from the real fire sound examples based on modified Empirical Mode Decomposition (EMD) method. Unlike previous methods, we take both direct combustion noise and vortex noise into account because the fire model is non-premixed flame. In our method, we also greatly resolve the synchronization problem during blending the three components of SSC. Due to the introduction of the popping sounds, it is easy to distinguish the fire sounds of different solid combustibles by our method, with great potential in practical applications such as games, VR system, etc. Various experiments and comparisons are presented to validate our method.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "With the rapidly growing VR industry, in recent years, more and more attention has been paid for fire sound synthesis. However, previous methods usually ignore the influences of the different solid combustibles, leading to unrealistic sounding results. This paper proposes SSC (sounding solid combustibles), which is a new recording-driven non-premixed flame sound synthesis framework accounting for different solid combustibles. SSC consists of three components: combustion noise, vortex noise and popping sounds. The popping sounds are the keys to distinguish the differences of solid combustibles. To improve the quality of fire sound, we extract the features of popping sounds from the real fire sound examples based on modified Empirical Mode Decomposition (EMD) method. Unlike previous methods, we take both direct combustion noise and vortex noise into account because the fire model is non-premixed flame. In our method, we also greatly resolve the synchronization problem during blending the three components of SSC. Due to the introduction of the popping sounds, it is easy to distinguish the fire sounds of different solid combustibles by our method, with great potential in practical applications such as games, VR system, etc. Various experiments and comparisons are presented to validate our method.", "title": "Sounding Solid Combustibles: Non-Premixed Flame Sound Synthesis for Different Solid Combustibles", "normalizedTitle": "Sounding Solid Combustibles: Non-Premixed Flame Sound Synthesis for Different Solid Combustibles", "fno": "07792706", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Chemically Reactive Flow", "Combustion", "Flames", "Noise", "Vortices", "Sounding Solid Combustibles", "Vortex Noise", "Popping Sounds", "Direct Combustion Noise", "Fire Sound Synthesis", "Nonpremixed Flame Sound Synthesis", "Modified Empirical Mode Decomposition Method", "Fire Model", "Combustion", "Solids", "Synchronization", "Mathematical Model", "Animation", "Heating", "Bandwidth", "Non Premixed Fire Sound", "Solid Combustibles", "Direct Combustion Noise", "Vortex Noise", "Popping Sounds" ], "authors": [ { "givenName": "Qiang", "surname": "Yin", "fullName": "Qiang Yin", "affiliation": "School of Computer Science and Technology, Tianjin University, Tianjin, China", "__typename": "ArticleAuthorType" }, { "givenName": "Shiguang", "surname": "Liu", "fullName": "Shiguang Liu", "affiliation": "School of Computer Science and Technology, Tianjin University, Tianjin, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2018-02-01 00:00:00", "pubType": "trans", "pages": "1179-1189", "year": "2018", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cesce/2010/3972/1/3972a402", "title": "SEM-EDS Analysis of Fly Ash from One Shanghai Municipal Solid Waste Incineration Plant during Heating Process", "doi": null, "abstractUrl": "/proceedings-article/cesce/2010/3972a402/12OmNApcuEN", "parentPublication": { "id": null, "title": null, "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isdea/2013/4893/0/06455228", "title": "Thermal Conductivity Measurement about Fluid and Solid", "doi": null, "abstractUrl": "/proceedings-article/isdea/2013/06455228/12OmNApcuwI", "parentPublication": { "id": "proceedings/isdea/2013/4893/0", "title": "2013 Third International Conference on Intelligent System Design and Engineering Applications (ISDEA 2013)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pbg/2005/20/0/01500327", "title": "A unified Lagrangian approach to solid-fluid animation", "doi": null, "abstractUrl": "/proceedings-article/pbg/2005/01500327/12OmNvD8RFL", "parentPublication": { "id": "proceedings/pbg/2005/20/0", "title": "Point-Based Graphics 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgames/2013/0820/0/06632621", "title": "Real-time rendering of burning solid objects in video games", "doi": null, "abstractUrl": "/proceedings-article/cgames/2013/06632621/12OmNyeECBH", "parentPublication": { "id": "proceedings/cgames/2013/0820/0", "title": "2013 18th International Conference on Computer Games: AI, Animation, Mobile, Interactive Multimedia, Educational & Serious Games (CGAMES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2014/6636/0/6636a252", "title": "Experimental Study on Total Flooding Extinguishing Test by New Ultra-fine Water Mist Fire Extinguisher Extinguishing Combustion of Paper", "doi": null, "abstractUrl": "/proceedings-article/icicta/2014/6636a252/12OmNzvz6H6", "parentPublication": { "id": "proceedings/icicta/2014/6636/0", "title": "2014 7th International Conference on Intelligent Computation Technology and Automation (ICICTA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/08/06171181", "title": "A Multigrid Fluid Pressure Solver Handling Separating Solid Boundary Conditions", "doi": null, "abstractUrl": "/journal/tg/2012/08/06171181/13rRUxlgxTi", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/07/07448467", "title": "Adaptive Skinning for Interactive Hair-Solid Simulation", "doi": null, "abstractUrl": "/journal/tg/2017/07/07448467/13rRUygBw7e", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/trustcom/2020/4380/0/438000b546", "title": "Prediction of high Precision Ignition Process in the Condition of Multi Flow Area Coupling of Micro Solid Rocket Motor", "doi": null, "abstractUrl": "/proceedings-article/trustcom/2020/438000b546/1r54fSFu4GA", "parentPublication": { "id": "proceedings/trustcom/2020/4380/0", "title": "2020 IEEE 19th International Conference on Trust, Security and Privacy in Computing and Communications (TrustCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07814314", "articleId": "13rRUxYrbMm", "__typename": "AdjacentArticleType" }, "next": { "fno": "07833201", "articleId": "13rRUx0gezW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXWRGD", "name": "ttg201802-07792706s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201802-07792706s1.zip", "extension": "zip", "size": "54.3 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNvTBB89", "title": "Feb.", "year": "2018", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "24", "label": "Feb.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUx0gezW", "doi": "10.1109/TVCG.2017.2657766", "abstract": "Handheld scanning using commodity depth cameras provides a flexible and low-cost manner to get 3D models. The existing methods scan a target by densely fusing all the captured depth images, yet most frames are redundant. The jittering frames inevitably embedded in handheld scanning process will cause feature blurring on the reconstructed model and even trigger the scan failure (i.e., camera tracking losing). To address these problems, in this paper, we propose a novel sparse-sequence fusion (SSF) algorithm for handheld scanning using commodity depth cameras. It first extracts related measurements for analyzing camera motion. Then based on these measurements, we progressively construct a supporting subset for the captured depth image sequence to decrease the data redundancy and the interference from jittering frames. Since SSF will reveal the intrinsic heavy noise of the original depth images, our method introduces a refinement process to eliminate the raw noise and recover geometric features for the depth images selected into the supporting subset. We finally obtain the fused result by integrating the refined depth images into the truncated signed distance field (TSDF) of the target. Multiple comparison experiments are conducted and the results verify the feasibility and validity of SSF for handheld scanning with a commodity depth camera.", "abstracts": [ { "abstractType": "Regular", "content": "Handheld scanning using commodity depth cameras provides a flexible and low-cost manner to get 3D models. The existing methods scan a target by densely fusing all the captured depth images, yet most frames are redundant. The jittering frames inevitably embedded in handheld scanning process will cause feature blurring on the reconstructed model and even trigger the scan failure (i.e., camera tracking losing). To address these problems, in this paper, we propose a novel sparse-sequence fusion (SSF) algorithm for handheld scanning using commodity depth cameras. It first extracts related measurements for analyzing camera motion. Then based on these measurements, we progressively construct a supporting subset for the captured depth image sequence to decrease the data redundancy and the interference from jittering frames. Since SSF will reveal the intrinsic heavy noise of the original depth images, our method introduces a refinement process to eliminate the raw noise and recover geometric features for the depth images selected into the supporting subset. We finally obtain the fused result by integrating the refined depth images into the truncated signed distance field (TSDF) of the target. Multiple comparison experiments are conducted and the results verify the feasibility and validity of SSF for handheld scanning with a commodity depth camera.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Handheld scanning using commodity depth cameras provides a flexible and low-cost manner to get 3D models. The existing methods scan a target by densely fusing all the captured depth images, yet most frames are redundant. The jittering frames inevitably embedded in handheld scanning process will cause feature blurring on the reconstructed model and even trigger the scan failure (i.e., camera tracking losing). To address these problems, in this paper, we propose a novel sparse-sequence fusion (SSF) algorithm for handheld scanning using commodity depth cameras. It first extracts related measurements for analyzing camera motion. Then based on these measurements, we progressively construct a supporting subset for the captured depth image sequence to decrease the data redundancy and the interference from jittering frames. Since SSF will reveal the intrinsic heavy noise of the original depth images, our method introduces a refinement process to eliminate the raw noise and recover geometric features for the depth images selected into the supporting subset. We finally obtain the fused result by integrating the refined depth images into the truncated signed distance field (TSDF) of the target. Multiple comparison experiments are conducted and the results verify the feasibility and validity of SSF for handheld scanning with a commodity depth camera.", "title": "Surface Reconstruction via Fusing Sparse-Sequence of Depth Images", "normalizedTitle": "Surface Reconstruction via Fusing Sparse-Sequence of Depth Images", "fno": "07833201", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Cameras", "Image Reconstruction", "Surface Reconstruction", "Three Dimensional Displays", "Image Sequences", "Image Segmentation", "Solid Modeling", "Depth Image Refinement", "Handheld Scanning", "Sparse Sequence Fusion", "Surface Reconstruction", "Supporting Subset" ], "authors": [ { "givenName": "Long", "surname": "Yang", "fullName": "Long Yang", "affiliation": "Computer School, Wuhan University, Wuhan, Hubei, China", "__typename": "ArticleAuthorType" }, { "givenName": "Qingan", "surname": "Yan", "fullName": "Qingan Yan", "affiliation": "State Key Lab of Software Engineering, Computer School, Wuhan University, Wuhan, Hubei, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yanping", "surname": "Fu", "fullName": "Yanping Fu", "affiliation": "State Key Lab of Software Engineering, Computer School, Wuhan University, Wuhan, Hubei, China", "__typename": "ArticleAuthorType" }, { "givenName": "Chunxia", "surname": "Xiao", "fullName": "Chunxia Xiao", "affiliation": "State Key Lab of Software Engineering, Computer School, Wuhan University, Wuhan, Hubei, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2018-02-01 00:00:00", "pubType": "trans", "pages": "1190-1203", "year": "2018", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/msn/2013/5159/0/06726381", "title": "Depth Mapping Using the Hierarchical Reconstruction of Multiple Sequence", "doi": null, "abstractUrl": "/proceedings-article/msn/2013/06726381/12OmNClQ0Bv", "parentPublication": { "id": "proceedings/msn/2013/5159/0", "title": "2013 Ninth International Conference on Mobile Ad-hoc and Sensor Networks (MSN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smartcomp/2014/5711/0/07043853", "title": "Enabling 3D online shopping with affordable depth scanned models", "doi": null, "abstractUrl": "/proceedings-article/smartcomp/2014/07043853/12OmNwHz07o", "parentPublication": { "id": "proceedings/smartcomp/2014/5711/0", "title": "2014 International Conference on Smart Computing (SMARTCOMP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2017/2610/0/261001a057", "title": "OctNetFusion: Learning Depth Fusion from Data", "doi": null, "abstractUrl": "/proceedings-article/3dv/2017/261001a057/12OmNxFJXuz", "parentPublication": { "id": "proceedings/3dv/2017/2610/0", "title": "2017 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2014/4311/0/4311a055", "title": "When Specular Object Meets RGB-D Camera 3D Scanning: Color Image Plus Fragmented Depth Map", "doi": null, "abstractUrl": "/proceedings-article/ism/2014/4311a055/12OmNyUWR8A", "parentPublication": { "id": "proceedings/ism/2014/4311/0", "title": "2014 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcabes/2015/6593/0/6593a352", "title": "Quick Capture and Reconstruction for 3D Head", "doi": null, "abstractUrl": "/proceedings-article/dcabes/2015/6593a352/12OmNyUnEKB", "parentPublication": { "id": "proceedings/dcabes/2015/6593/0", "title": "2015 14th International Symposium on Distributed Computing and Applications for Business Engineering and Science (DCABES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032a910", "title": "BodyFusion: Real-Time Capture of Human Motion and Surface Geometry Using a Single Depth Camera", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032a910/12OmNzT7Otl", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761015", "title": "Usage of needle maps and shadows to overcome depth edges in depth map reconstruction", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761015/12OmNzkuKKM", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2013/05/ttp2013051039", "title": "Algorithms for 3D Shape Scanning with a Depth Camera", "doi": null, "abstractUrl": "/journal/tp/2013/05/ttp2013051039/13rRUxNW1UZ", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2018/8425/0/842500a012", "title": "Surface Light Field Fusion", "doi": null, "abstractUrl": "/proceedings-article/3dv/2018/842500a012/17D45WODasr", "parentPublication": { "id": "proceedings/3dv/2018/8425/0", "title": "2018 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/02/09184024", "title": "GeoNet++: Iterative Geometric Neural Network with Edge-Aware Refinement for Joint Depth and Surface Normal Estimation", "doi": null, "abstractUrl": "/journal/tp/2022/02/09184024/1mLHVYnhWko", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07792706", "articleId": "13rRUxDqS8m", "__typename": "AdjacentArticleType" }, "next": { "fno": "07862917", "articleId": "13rRUwInvsY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgIN", "name": "ttg201802-07833201s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201802-07833201s1.zip", "extension": "zip", "size": "14.9 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNvTBB89", "title": "Feb.", "year": "2018", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "24", "label": "Feb.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwInvsY", "doi": "10.1109/TVCG.2017.2672987", "abstract": "Analyzing high-dimensional data and finding hidden patterns is a difficult problem and has attracted numerous research efforts. Automated methods can be useful to some extent but bringing the data analyst into the loop via interactive visual tools can help the discovery process tremendously. An inherent problem in this effort is that humans lack the mental capacity to truly understand spaces exceeding three spatial dimensions. To keep within this limitation, we describe a framework that decomposes a high-dimensional data space into a continuum of generalized 3D subspaces. Analysts can then explore these 3D subspaces individually via the familiar trackball interface while using additional facilities to smoothly transition to adjacent subspaces for expanded space comprehension. Since the number of such subspaces suffers from combinatorial explosion, we provide a set of data-driven subspace selection and navigation tools which can guide users to interesting subspaces and views. A subspace trail map allows users to manage the explored subspaces, keep their bearings, and return to interesting subspaces and views. Both trackball and trail map are each embedded into a word cloud of attribute labels which aid in navigation. We demonstrate our system via several use cases in a diverse set of application areas—cluster analysis and refinement, information discovery, and supervised training of classifiers. We also report on a user study that evaluates the usability of the various interactions our system provides.", "abstracts": [ { "abstractType": "Regular", "content": "Analyzing high-dimensional data and finding hidden patterns is a difficult problem and has attracted numerous research efforts. Automated methods can be useful to some extent but bringing the data analyst into the loop via interactive visual tools can help the discovery process tremendously. An inherent problem in this effort is that humans lack the mental capacity to truly understand spaces exceeding three spatial dimensions. To keep within this limitation, we describe a framework that decomposes a high-dimensional data space into a continuum of generalized 3D subspaces. Analysts can then explore these 3D subspaces individually via the familiar trackball interface while using additional facilities to smoothly transition to adjacent subspaces for expanded space comprehension. Since the number of such subspaces suffers from combinatorial explosion, we provide a set of data-driven subspace selection and navigation tools which can guide users to interesting subspaces and views. A subspace trail map allows users to manage the explored subspaces, keep their bearings, and return to interesting subspaces and views. Both trackball and trail map are each embedded into a word cloud of attribute labels which aid in navigation. We demonstrate our system via several use cases in a diverse set of application areas—cluster analysis and refinement, information discovery, and supervised training of classifiers. We also report on a user study that evaluates the usability of the various interactions our system provides.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Analyzing high-dimensional data and finding hidden patterns is a difficult problem and has attracted numerous research efforts. Automated methods can be useful to some extent but bringing the data analyst into the loop via interactive visual tools can help the discovery process tremendously. An inherent problem in this effort is that humans lack the mental capacity to truly understand spaces exceeding three spatial dimensions. To keep within this limitation, we describe a framework that decomposes a high-dimensional data space into a continuum of generalized 3D subspaces. Analysts can then explore these 3D subspaces individually via the familiar trackball interface while using additional facilities to smoothly transition to adjacent subspaces for expanded space comprehension. Since the number of such subspaces suffers from combinatorial explosion, we provide a set of data-driven subspace selection and navigation tools which can guide users to interesting subspaces and views. A subspace trail map allows users to manage the explored subspaces, keep their bearings, and return to interesting subspaces and views. Both trackball and trail map are each embedded into a word cloud of attribute labels which aid in navigation. We demonstrate our system via several use cases in a diverse set of application areas—cluster analysis and refinement, information discovery, and supervised training of classifiers. We also report on a user study that evaluates the usability of the various interactions our system provides.", "title": "The Subspace Voyager: Exploring High-Dimensional Data along a Continuum of Salient 3D Subspaces", "normalizedTitle": "The Subspace Voyager: Exploring High-Dimensional Data along a Continuum of Salient 3D Subspaces", "fno": "07862917", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Three Dimensional Displays", "Visualization", "Space Exploration", "Two Dimensional Displays", "Navigation", "Cognition", "Layout", "High Dimensional Data", "Subspace Navigation", "Trackball", "PCA", "Ant Colony Optimization 1" ], "authors": [ { "givenName": "Bing", "surname": "Wang", "fullName": "Bing Wang", "affiliation": "Visual Analytics and Imaging Lab at the Computer Science Department, Stony Brook University, Stony Brook, NY", "__typename": "ArticleAuthorType" }, { "givenName": "Klaus", "surname": "Mueller", "fullName": "Klaus Mueller", "affiliation": "Visual Analytics and Imaging Lab at the Computer Science Department, Stony Brook University, Stony Brook, NY", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2018-02-01 00:00:00", "pubType": "trans", "pages": "1204-1222", "year": "2018", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2017/0457/0/0457b513", "title": "DUST: Dual Union of Spatio-Temporal Subspaces for Monocular Multiple Object 3D Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457b513/12OmNBrlPyw", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2009/4420/0/05459302", "title": "FLoSS: Facility location for subspace segmentation", "doi": null, "abstractUrl": "/proceedings-article/iccv/2009/05459302/12OmNsbGvFi", "parentPublication": { "id": "proceedings/iccv/2009/4420/0", "title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2009/4420/0/05459288", "title": "The Normalized Subspace Inclusion: Robust clustering of motion subspaces", "doi": null, "abstractUrl": "/proceedings-article/iccv/2009/05459288/12OmNyugyH8", "parentPublication": { "id": "proceedings/iccv/2009/4420/0", "title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2014/4761/0/06890178", "title": "Local subspace video stabilization", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890178/12OmNzUPpna", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2011/0394/0/05995679", "title": "Graph connectivity in sparse subspace clustering", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2011/05995679/12OmNzayNpU", "parentPublication": { "id": "proceedings/cvpr/2011/0394/0", "title": "CVPR 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2015/11/07053916", "title": "Difference Subspace and Its Generalization for Subspace-Based Methods", "doi": null, "abstractUrl": "/journal/tp/2015/11/07053916/13rRUytF42H", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/10068257", "title": "Interactive Subspace Cluster Analysis Guided by Semantic Attribute Associations", "doi": null, "abstractUrl": "/journal/tg/5555/01/10068257/1LtR7CeyeHe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/10/09067086", "title": "Multilinear Modelling of Faces and Expressions", "doi": null, "abstractUrl": "/journal/tp/2021/10/09067086/1j1lrPUsq8E", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csci/2019/5584/0/558400a722", "title": "A New Method for Fraud Detection in Credit Cards Based on Transaction Dynamics in Subspaces", "doi": null, "abstractUrl": "/proceedings-article/csci/2019/558400a722/1jdE0pU6SwU", "parentPublication": { "id": "proceedings/csci/2019/5584/0", "title": "2019 International Conference on Computational Science and Computational Intelligence (CSCI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09222353", "title": "Implicit Multidimensional Projection of Local Subspaces", "doi": null, "abstractUrl": "/journal/tg/2021/02/09222353/1nTqcxPMEIE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07833201", "articleId": "13rRUx0gezW", "__typename": "AdjacentArticleType" }, "next": { "fno": "08241875", "articleId": "13rRUxYIMV8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgE1", "name": "ttg201802-07862917s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201802-07862917s1.zip", "extension": "zip", "size": "17.5 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNvTBB89", "title": "Feb.", "year": "2018", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "24", "label": "Feb.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxYIMV8", "doi": "10.1109/TVCG.2017.2779098", "abstract": null, "abstracts": [], "normalizedAbstract": null, "title": "2017 Index IEEE Transactions on Visualization and Computer Graphics Vol. 23", "normalizedTitle": "2017 Index IEEE Transactions on Visualization and Computer Graphics Vol. 23", "fno": "08241875", "hasPdf": true, "idPrefix": "tg", "keywords": [], "authors": [], "replicability": null, "showBuyMe": false, "showRecommendedArticles": false, "isOpenAccess": true, "issueNum": "02", "pubDate": "2018-02-01 00:00:00", "pubType": "trans", "pages": "1223-1251", "year": "2018", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "07862917", "articleId": "13rRUwInvsY", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyxXloT", "title": "March/April", "year": "2008", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "14", "label": "March/April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUzpzeAX", "doi": "10.1109/TVCG.2008.13", "abstract": "Presents the table of contents for this issue of the periodical.", "abstracts": [ { "abstractType": "Regular", "content": "Presents the table of contents for this issue of the periodical.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents the table of contents for this issue of the periodical.", "title": "[Front cover]", "normalizedTitle": "[Front cover]", "fno": "04435109", "hasPdf": true, "idPrefix": "tg", "keywords": [], "authors": [], "replicability": null, "showBuyMe": false, "showRecommendedArticles": false, "isOpenAccess": true, "issueNum": "02", "pubDate": "2008-03-01 00:00:00", "pubType": "trans", "pages": "c1", "year": "2008", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": null, "next": { "fno": "04435110", "articleId": "13rRUwI5U7V", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyxXloT", "title": "March/April", "year": "2008", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "14", "label": "March/April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwI5U7V", "doi": "10.1109/TVCG.2008.14", "abstract": "Provides a listing of current committee members and society officers.", "abstracts": [ { "abstractType": "Regular", "content": "Provides a listing of current committee members and society officers.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Provides a listing of current committee members and society officers.", "title": "[Inside front cover]", "normalizedTitle": "[Inside front cover]", "fno": "04435110", "hasPdf": true, "idPrefix": "tg", "keywords": [], "authors": [], "replicability": null, "showBuyMe": false, "showRecommendedArticles": false, "isOpenAccess": true, "issueNum": "02", "pubDate": "2008-03-01 00:00:00", "pubType": "trans", "pages": "c2", "year": "2008", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "04435109", "articleId": "13rRUzpzeAX", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2008020313", "articleId": "13rRUxZRbnW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyxXloT", "title": "March/April", "year": "2008", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "14", "label": "March/April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxZRbnW", "doi": "10.1109/TVCG.2007.70429", "abstract": "We introduce a family of box splines for efficient, accurate, and smooth reconstruction of volumetric data sampled on the body-centered cubic (BCC) lattice, which is the favorable volumetric sampling pattern due to its optimal spectral sphere packing property. First, we construct a box spline based on the four principal directions of the BCC lattice that allows for a linear C<sup>0</sup> reconstruction. Then, the design is extended for higher degrees of continuity. We derive the explicit piecewise polynomial representations of the C<sup>0</sup> and C<sup>2</sup> box splines that are useful for practical reconstruction applications. We further demonstrate that approximation in the shift-invariant space - generated by BCC-lattice shifts of these box splines - is twice as efficient as using the tensor-product B-spline solutions on the Cartesian lattice (with comparable smoothness and approximation order and with the same sampling density). Practical evidence is provided demonstrating that the BCC lattice not only is generally a more accurate sampling pattern, but also allows for extremely efficient reconstructions that outperform tensor-product Cartesian reconstructions.", "abstracts": [ { "abstractType": "Regular", "content": "We introduce a family of box splines for efficient, accurate, and smooth reconstruction of volumetric data sampled on the body-centered cubic (BCC) lattice, which is the favorable volumetric sampling pattern due to its optimal spectral sphere packing property. First, we construct a box spline based on the four principal directions of the BCC lattice that allows for a linear C<sup>0</sup> reconstruction. Then, the design is extended for higher degrees of continuity. We derive the explicit piecewise polynomial representations of the C<sup>0</sup> and C<sup>2</sup> box splines that are useful for practical reconstruction applications. We further demonstrate that approximation in the shift-invariant space - generated by BCC-lattice shifts of these box splines - is twice as efficient as using the tensor-product B-spline solutions on the Cartesian lattice (with comparable smoothness and approximation order and with the same sampling density). Practical evidence is provided demonstrating that the BCC lattice not only is generally a more accurate sampling pattern, but also allows for extremely efficient reconstructions that outperform tensor-product Cartesian reconstructions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We introduce a family of box splines for efficient, accurate, and smooth reconstruction of volumetric data sampled on the body-centered cubic (BCC) lattice, which is the favorable volumetric sampling pattern due to its optimal spectral sphere packing property. First, we construct a box spline based on the four principal directions of the BCC lattice that allows for a linear C0 reconstruction. Then, the design is extended for higher degrees of continuity. We derive the explicit piecewise polynomial representations of the C0 and C2 box splines that are useful for practical reconstruction applications. We further demonstrate that approximation in the shift-invariant space - generated by BCC-lattice shifts of these box splines - is twice as efficient as using the tensor-product B-spline solutions on the Cartesian lattice (with comparable smoothness and approximation order and with the same sampling density). Practical evidence is provided demonstrating that the BCC lattice not only is generally a more accurate sampling pattern, but also allows for extremely efficient reconstructions that outperform tensor-product Cartesian reconstructions.", "title": "Practical Box Splines for Reconstruction on the Body Centered Cubic Lattice", "normalizedTitle": "Practical Box Splines for Reconstruction on the Body Centered Cubic Lattice", "fno": "ttg2008020313", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computational Geometry", "Splines Mathematics", "Box Splines", "Body Centered Cubic Lattice Reconstruction", "Volumetric Data Reconstruction", "Volumetric Sampling Pattern", "Explicit Piecewise Polynomial Representations", "Cartesian Lattice", "Lattices", "Sampling Methods", "Image Reconstruction", "FCC", "Frequency Domain Analysis", "Signal Sampling", "Signal Processing", "Spline", "Kernel", "Signal Design", "Spline And Piecewise Polynomial Approximation", "Spline And Piecewise Polynomial Interpolation", "Splines", "Signal Processing", "Reconstruction", "Finite Volume Methods", "Spline And Piecewise Polynomial Approximation", "Spline And Piecewise Polynomial Interpolation", "Splines", "Signal Processing", "Reconstruction", "Finite Volume Methods" ], "authors": [ { "givenName": "Alireza", "surname": "Entezari", "fullName": "Alireza Entezari", "affiliation": "Simon Fraser Univ., Burnaby", "__typename": "ArticleAuthorType" }, { "givenName": "Dimitri", "surname": "Van De Ville", "fullName": "Dimitri Van De Ville", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Torsten", "surname": "Moller", "fullName": "Torsten Moller", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2008-03-01 00:00:00", "pubType": "trans", "pages": "313-328", "year": "2008", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/2005/2766/0/27660040", "title": "Prefiltered Gaussian Reconstruction for High-Quality Rendering of Volumetric Data sampled on a Body-Centered Cubic Grid", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660040/12OmNAfy7Ky", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cad-graphics/2015/8020/0/07450400", "title": "Hierarchical Box Splines", "doi": null, "abstractUrl": "/proceedings-article/cad-graphics/2015/07450400/12OmNqI04Q6", "parentPublication": { "id": "proceedings/cad-graphics/2015/8020/0", "title": "2015 14th International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgi/1996/7518/0/75180220", "title": "Generalized Geometric Cubic Splines", "doi": null, "abstractUrl": "/proceedings-article/cgi/1996/75180220/12OmNrkjVdG", "parentPublication": { "id": "proceedings/cgi/1996/7518/0", "title": "Computer Graphics International Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2004/8788/0/87880011", "title": "Linear and Cubic Box Splines for the Body Centered Cubic Lattice", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2004/87880011/12OmNvAiScO", "parentPublication": { "id": "proceedings/ieee-vis/2004/8788/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532810", "title": "Prefiltered Gaussian reconstruction for high-quality rendering of volumetric data sampled on a body-centered cubic grid", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532810/12OmNwFzO0f", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/03/ttg2010030499", "title": "An Evaluation of Prefiltered B-Spline Reconstruction for Quasi-Interpolation on the Body-Centered Cubic Lattice", "doi": null, "abstractUrl": "/journal/tg/2010/03/ttg2010030499/13rRUEgarBn", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/09/ttg2013091455", "title": "Cosine-Weighted B-Spline Interpolation: A Fast and High-Quality Reconstruction Scheme for the Body-Centered Cubic Lattice", "doi": null, "abstractUrl": "/journal/tg/2013/09/ttg2013091455/13rRUx0xPTS", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/02/ttg2013020319", "title": "Quartic Box-Spline Reconstruction on the BCC Lattice", "doi": null, "abstractUrl": "/journal/tg/2013/02/ttg2013020319/13rRUxC0SvT", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/05/v1337", "title": "Extensions of the Zwart-Powell Box Spline for Volumetric Data Reconstruction on the Cartesian Lattice", "doi": null, "abstractUrl": "/journal/tg/2006/05/v1337/13rRUxjQybK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/06/ttg2008061523", "title": "Box Spline Reconstruction On The Face-Centered Cubic Lattice", "doi": null, "abstractUrl": "/journal/tg/2008/06/ttg2008061523/13rRUy0qnLC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "04435110", "articleId": "13rRUwI5U7V", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2008020245", "articleId": "13rRUxYrbM7", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyxXloT", "title": "March/April", "year": "2008", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "14", "label": "March/April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxYrbM7", "doi": "10.1109/TVCG.2008.17", "abstract": null, "abstracts": [ { "abstractType": "Regular", "content": "", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": null, "title": "Editor's Note", "normalizedTitle": "Editor's Note", "fno": "ttg2008020245", "hasPdf": true, "idPrefix": "tg", "keywords": [], "authors": [ { "givenName": "Thomas", "surname": "Ertl", "fullName": "Thomas Ertl", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "02", "pubDate": "2008-03-01 00:00:00", "pubType": "trans", "pages": "245", "year": "2008", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tg/2011/05/ttg2011050553", "title": "Editor's Note", "doi": null, "abstractUrl": "/journal/tg/2011/05/ttg2011050553/13rRUB7a1fM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/02/07000011", "title": "Editor's Note", "doi": null, "abstractUrl": "/journal/tg/2015/02/07000011/13rRUEgarBx", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/06/ttg2013060897", "title": "Editor's Note", "doi": null, "abstractUrl": "/journal/tg/2013/06/ttg2013060897/13rRUIM2VBG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/1996/08/t0865", "title": "Editor's Note", "doi": null, "abstractUrl": "/journal/tc/1996/08/t0865/13rRUNvgyVd", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2013/01/ttp2013010003", "title": "Editor's note", "doi": null, "abstractUrl": "/journal/tp/2013/01/ttp2013010003/13rRUx0gew9", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2009/02/ttd2009020145", "title": "Editor's Note", "doi": null, "abstractUrl": "/journal/td/2009/02/ttd2009020145/13rRUxASu0n", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2003/02/v0188", "title": "Editor's Note", "doi": null, "abstractUrl": "/journal/tg/2003/02/v0188/13rRUxBa5rK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/2004/04/01268394", "title": "Editor's note", "doi": null, "abstractUrl": "/journal/tc/2004/04/01268394/13rRUxZRbnc", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/05/06776318", "title": "Editor's Note", "doi": null, "abstractUrl": "/journal/tg/2014/05/06776318/13rRUy2YLYw", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/1999/03/v0193", "title": "Editor's Note", "doi": null, "abstractUrl": "/journal/tg/1999/03/v0193/13rRUynHuiU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2008020313", "articleId": "13rRUxZRbnW", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2008020246", "articleId": "13rRUx0xPZs", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyxXloT", "title": "March/April", "year": "2008", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "14", "label": "March/April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUx0xPZs", "doi": "10.1109/TVCG.2007.1069", "abstract": "We present a system for constructing 3D models of real-world objects with optically challenging surfaces. The system utilizes a new range imaging concept called multi-peak range imaging, which stores multiple candidates of range measurements for each point on the object surface. The multiple measurements include the erroneous range data caused by various surface properties that are not ideal for structured-light range sensing. False measurements generated by spurious reflections are eliminated by applying a series of constraint tests. The constraint tests based on local surface and local sensor visibility are applied first to individual range images. The constraint tests based on global consistency of coordinates and visibility are then applied to all range images acquired from different viewpoints. We show the effectiveness of our method by constructing 3D models of five different optically challenging objects. To evaluate the performance of the constraint tests and to examine the effects of the parameters used in the constraint tests, we acquired the ground truth data by painting those objects to suppress the surface-related properties that cause difficulties in range sensing. Experimental results indicate that our method significantly improves upon the traditional methods for constructing reliable 3D models of optically challenging objects.", "abstracts": [ { "abstractType": "Regular", "content": "We present a system for constructing 3D models of real-world objects with optically challenging surfaces. The system utilizes a new range imaging concept called multi-peak range imaging, which stores multiple candidates of range measurements for each point on the object surface. The multiple measurements include the erroneous range data caused by various surface properties that are not ideal for structured-light range sensing. False measurements generated by spurious reflections are eliminated by applying a series of constraint tests. The constraint tests based on local surface and local sensor visibility are applied first to individual range images. The constraint tests based on global consistency of coordinates and visibility are then applied to all range images acquired from different viewpoints. We show the effectiveness of our method by constructing 3D models of five different optically challenging objects. To evaluate the performance of the constraint tests and to examine the effects of the parameters used in the constraint tests, we acquired the ground truth data by painting those objects to suppress the surface-related properties that cause difficulties in range sensing. Experimental results indicate that our method significantly improves upon the traditional methods for constructing reliable 3D models of optically challenging objects.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a system for constructing 3D models of real-world objects with optically challenging surfaces. The system utilizes a new range imaging concept called multi-peak range imaging, which stores multiple candidates of range measurements for each point on the object surface. The multiple measurements include the erroneous range data caused by various surface properties that are not ideal for structured-light range sensing. False measurements generated by spurious reflections are eliminated by applying a series of constraint tests. The constraint tests based on local surface and local sensor visibility are applied first to individual range images. The constraint tests based on global consistency of coordinates and visibility are then applied to all range images acquired from different viewpoints. We show the effectiveness of our method by constructing 3D models of five different optically challenging objects. To evaluate the performance of the constraint tests and to examine the effects of the parameters used in the constraint tests, we acquired the ground truth data by painting those objects to suppress the surface-related properties that cause difficulties in range sensing. Experimental results indicate that our method significantly improves upon the traditional methods for constructing reliable 3D models of optically challenging objects.", "title": "3D Modeling of Optically Challenging Objects", "normalizedTitle": "3D Modeling of Optically Challenging Objects", "fno": "ttg2008020246", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Range Data", "Geometric Modeling", "Image Analysis", "Virtual Reality", "Feature Representation" ], "authors": [ { "givenName": "Johnny", "surname": "Park", "fullName": "Johnny Park", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Avinash", "surname": "Kak", "fullName": "Avinash Kak", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2008-03-01 00:00:00", "pubType": "trans", "pages": "246-262", "year": "2008", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/1997/7822/0/78220684", "title": "Recognizing Objects by Matching Oriented Points", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1997/78220684/12OmNBtCCMZ", "parentPublication": { "id": "proceedings/cvpr/1997/7822/0", "title": "Proceedings of IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mwp/2006/0203/0/04153827", "title": "Optically-Amplified Short-Length Analog Photonic Links", "doi": null, "abstractUrl": "/proceedings-article/mwp/2006/04153827/12OmNCdk2EN", "parentPublication": { "id": "proceedings/mwp/2006/0203/0", "title": "2006 International Topical Meeting on Microwave Photonics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/1998/8295/0/82950917", "title": "Consensus Surfaces for Modeling 3D Objects from Multiple Range Images", "doi": null, "abstractUrl": "/proceedings-article/iccv/1998/82950917/12OmNCmGNZR", "parentPublication": { "id": "proceedings/iccv/1998/8295/0", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2013/4795/0/06549425", "title": "Creating 3D Projection on tangible objects", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549425/12OmNqIzhfj", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sma/1997/7867/0/78670130", "title": "Geometrical cloning of 3D objects via simultaneous registration of multiple range images", "doi": null, "abstractUrl": "/proceedings-article/sma/1997/78670130/12OmNwpXRSq", "parentPublication": { "id": "proceedings/sma/1997/7867/0", "title": "Shape Modeling and Applications, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dpvt/2004/2223/0/22230707", "title": "Specularity Elimination in Range Sensing for Accurate 3D Modeling of Specular Objects", "doi": null, "abstractUrl": "/proceedings-article/3dpvt/2004/22230707/12OmNx3ZjdW", "parentPublication": { "id": "proceedings/3dpvt/2004/2223/0", "title": "3D Data Processing Visualization and Transmission, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2006/2606/0/26060347", "title": "Construction of 3D Composite Objects from Range Data", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2006/26060347/12OmNy3iFmH", "parentPublication": { "id": "proceedings/cgiv/2006/2606/0", "title": "International Conference on Computer Graphics, Imaging and Visualisation (CGIV'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dpvt/2004/2223/0/22230422", "title": "Accurate 3D Acquisition of Freely Moving Objects", "doi": null, "abstractUrl": "/proceedings-article/3dpvt/2004/22230422/12OmNzBOhwo", "parentPublication": { "id": "proceedings/3dpvt/2004/2223/0", "title": "3D Data Processing Visualization and Transmission, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/1995/08/i0820", "title": "Registering Multiview Range Data to Create 3D Computer Objects", "doi": null, "abstractUrl": "/journal/tp/1995/08/i0820/13rRUxlgxUh", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/1996/04/mcg1996040058", "title": "Viewing Geometric Protein Structures From Inside a CAVE", "doi": null, "abstractUrl": "/magazine/cg/1996/04/mcg1996040058/13rRUy3gn3r", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2008020245", "articleId": "13rRUxYrbM7", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2008020263", "articleId": "13rRUIIVlkb", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyxXloT", "title": "March/April", "year": "2008", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "14", "label": "March/April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUIIVlkb", "doi": "10.1109/TVCG.2007.70409", "abstract": "Abstract—With current methods for volume haptics in scientific visualization, features in time-varying data can freely move straight through the haptic probe without generating any haptic feedback ? the algorithms are simply not designed to handle variation with time but consider only the instantaneous configuration when the haptic feedback is calculated. This article introduces haptic rendering of dynamic volumetric data to provide a means for haptic exploration of dynamic behaviour in volumetric data. We show how haptic feedback can be produced that is consistent with volumetric data moving within the virtual environment and with data that, in itself, evolves over time. Haptic interaction with time-varying data is demonstrated by allowing palpation of a CT sequence of a beating human heart.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—With current methods for volume haptics in scientific visualization, features in time-varying data can freely move straight through the haptic probe without generating any haptic feedback ? the algorithms are simply not designed to handle variation with time but consider only the instantaneous configuration when the haptic feedback is calculated. This article introduces haptic rendering of dynamic volumetric data to provide a means for haptic exploration of dynamic behaviour in volumetric data. We show how haptic feedback can be produced that is consistent with volumetric data moving within the virtual environment and with data that, in itself, evolves over time. Haptic interaction with time-varying data is demonstrated by allowing palpation of a CT sequence of a beating human heart.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—With current methods for volume haptics in scientific visualization, features in time-varying data can freely move straight through the haptic probe without generating any haptic feedback ? the algorithms are simply not designed to handle variation with time but consider only the instantaneous configuration when the haptic feedback is calculated. This article introduces haptic rendering of dynamic volumetric data to provide a means for haptic exploration of dynamic behaviour in volumetric data. We show how haptic feedback can be produced that is consistent with volumetric data moving within the virtual environment and with data that, in itself, evolves over time. Haptic interaction with time-varying data is demonstrated by allowing palpation of a CT sequence of a beating human heart.", "title": "Haptic Rendering of Dynamic Volumetric Data", "normalizedTitle": "Haptic Rendering of Dynamic Volumetric Data", "fno": "ttg2008020263", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Direct Volume Haptics", "Time Varying Data", "Changing Model Transform", "Scientific Visualization" ], "authors": [ { "givenName": "Karljohan Lundin", "surname": "Palmerius", "fullName": "Karljohan Lundin Palmerius", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Matthew", "surname": "Cooper", "fullName": "Matthew Cooper", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Anders", "surname": "Ynnerman", "fullName": "Anders Ynnerman", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2008-03-01 00:00:00", "pubType": "trans", "pages": "263-276", "year": "2008", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/2003/2030/0/20300055", "title": "High Dimensional Direct Rendering of Time-Varying Volumetric Data", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2003/20300055/12OmNqyUUDX", "parentPublication": { "id": "proceedings/ieee-vis/2003/2030/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isuc/2008/3433/0/3433a268", "title": "Toward Volume-Based Haptic Collaborative Virtual Environment with Realistic Sensation", "doi": null, "abstractUrl": "/proceedings-article/isuc/2008/3433a268/12OmNvFpEvn", "parentPublication": { "id": "proceedings/isuc/2008/3433/0", "title": "2008 Second International Symposium on Universal Communication", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2010/9343/0/05643585", "title": "Haptic simulation of breast cancer palpation: A case study of haptic augmented reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2010/05643585/12OmNwtn3ui", "parentPublication": { "id": "proceedings/ismar/2010/9343/0", "title": "2010 IEEE International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2002/1489/0/14890003", "title": "Comparing Two Haptic Interfaces for Multimodal Graph Rendering", "doi": null, "abstractUrl": "/proceedings-article/haptics/2002/14890003/12OmNxwWoTG", "parentPublication": { "id": "proceedings/haptics/2002/1489/0", "title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/2011/1189/0/05999154", "title": "Ultrasound palpation by haptic elastography", "doi": null, "abstractUrl": "/proceedings-article/cbms/2011/05999154/12OmNy3Agyv", "parentPublication": { "id": "proceedings/cbms/2011/1189/0", "title": "2011 24th International Symposium on Computer-Based Medical Systems (CBMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vv/2002/7641/0/76410065", "title": "Shape Retaining Chain Linked Model for Real-Time Volume Haptic Rendering", "doi": null, "abstractUrl": "/proceedings-article/vv/2002/76410065/12OmNyGbIf6", "parentPublication": { "id": "proceedings/vv/2002/7641/0", "title": "Volume Visualization and Graphics, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2010/6821/0/05444648", "title": "Haptic interaction with volumetric datasets using surface-based haptic libraries", "doi": null, "abstractUrl": "/proceedings-article/haptics/2010/05444648/12OmNynJMXF", "parentPublication": { "id": "proceedings/haptics/2010/6821/0", "title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2000/0478/0/04780031", "title": "Dynamic Deformable Models for Enhanced Haptic Rendering in Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2000/04780031/12OmNzBwGmI", "parentPublication": { "id": "proceedings/vr/2000/0478/0", "title": "Virtual Reality Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2001/1227/0/12270254", "title": "Haptic Sculpting of Volumetric Implicit Functions", "doi": null, "abstractUrl": "/proceedings-article/pg/2001/12270254/12OmNzV70HP", "parentPublication": { "id": "proceedings/pg/2001/1227/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2008020246", "articleId": "13rRUx0xPZs", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2008020468", "articleId": "13rRUEgs2BO", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyxXloT", "title": "March/April", "year": "2008", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "14", "label": "March/April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUEgs2BO", "doi": "10.1109/TVCG.2007.70440", "abstract": "We introduce a novel technique to generate painterly art maps (PAMs) for 3D nonphotorealistic rendering. Our technique can automatically transfer brushstroke textures and color changes to 3D models from samples of a painted image. Therefore, the generation of stylized images or animation in the style of a given artwork can be achieved. This new approach works particularly well for a rich variety of brushstrokes ranging from simple 1D and 2D line-art strokes to very complicated ones with significant variations in stroke characteristics. During the rendering or animation process, the coherence of brushstroke textures and color changes over 3D surfaces can be well maintained. With PAM, we can also easily generate the illusion of flow animation over a 3D surface to convey the shape of a model.", "abstracts": [ { "abstractType": "Regular", "content": "We introduce a novel technique to generate painterly art maps (PAMs) for 3D nonphotorealistic rendering. Our technique can automatically transfer brushstroke textures and color changes to 3D models from samples of a painted image. Therefore, the generation of stylized images or animation in the style of a given artwork can be achieved. This new approach works particularly well for a rich variety of brushstrokes ranging from simple 1D and 2D line-art strokes to very complicated ones with significant variations in stroke characteristics. During the rendering or animation process, the coherence of brushstroke textures and color changes over 3D surfaces can be well maintained. With PAM, we can also easily generate the illusion of flow animation over a 3D surface to convey the shape of a model.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We introduce a novel technique to generate painterly art maps (PAMs) for 3D nonphotorealistic rendering. Our technique can automatically transfer brushstroke textures and color changes to 3D models from samples of a painted image. Therefore, the generation of stylized images or animation in the style of a given artwork can be achieved. This new approach works particularly well for a rich variety of brushstrokes ranging from simple 1D and 2D line-art strokes to very complicated ones with significant variations in stroke characteristics. During the rendering or animation process, the coherence of brushstroke textures and color changes over 3D surfaces can be well maintained. With PAM, we can also easily generate the illusion of flow animation over a 3D surface to convey the shape of a model.", "title": "Stylized Rendering Using Samples of a Painted Image", "normalizedTitle": "Stylized Rendering Using Samples of a Painted Image", "fno": "ttg2008020468", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Animation", "Image Colour Analysis", "Image Sampling", "Image Texture", "Rendering Computer Graphics", "Painted Image", "3 D Nonphotorealistic Rendering", "Image Colour Analysis", "Computer Animation", "Brushstroke Texture", "Image Sampling", "Rendering Computer Graphics", "Painting", "Art", "Paints", "Image Generation", "Animation", "Coherence", "Real Time Systems", "Color", "Surface Texture", "Computer Graphics", "Color", "Shading", "Shadowing", "And Texture", "Display Algorithms", "Computer Graphics", "Color", "Shading", "Shadowing", "And Texture", "Display Algorithms" ], "authors": [ { "givenName": "Chung-Ren", "surname": "Yan", "fullName": "Chung-Ren Yan", "affiliation": "Computer Graphics Group/Visual System Laboratory, Department of Computer Science and Engineering, National Cheng-Kung University.", "__typename": "ArticleAuthorType" }, { "givenName": "Ming-Te", "surname": "Chi", "fullName": "Ming-Te Chi", "affiliation": "Computer Graphics Group/Visual System Laboratory, Department of Computer Science and Engineering, National Cheng-Kung University.", "__typename": "ArticleAuthorType" }, { "givenName": "Tong-Yee", "surname": "Lee", "fullName": "Tong-Yee Lee", "affiliation": "Computer Graphics Group/Visual System Laboratory, Department of Computer Science and Engineering, National Cheng-Kung University.", "__typename": "ArticleAuthorType" }, { "givenName": "Wen-Chieh", "surname": "Lin", "fullName": "Wen-Chieh Lin", "affiliation": "Department of Computer Science, National Chiao-Tung University, Hsinchu, Taiwan.", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2008-03-01 00:00:00", "pubType": "trans", "pages": "468-480", "year": "2008", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vv/2002/7641/0/76410115", "title": "Accelerating Volume Rendering with Texture Hulls", "doi": null, "abstractUrl": "/proceedings-article/vv/2002/76410115/12OmNB6D70H", "parentPublication": { "id": "proceedings/vv/2002/7641/0", "title": "Volume Visualization and Graphics, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1993/3940/0/00398880", "title": "Spray rendering: Visualization using smart particles", "doi": null, "abstractUrl": "/proceedings-article/visual/1993/00398880/12OmNrJ11HF", "parentPublication": { "id": "proceedings/visual/1993/3940/0", "title": "Proceedings Visualization '93", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgi/2004/2171/0/21710222", "title": "Extracting 3D Stylized Accentuation Effects from a Painted Image", "doi": null, "abstractUrl": "/proceedings-article/cgi/2004/21710222/12OmNwdtwke", "parentPublication": { "id": "proceedings/cgi/2004/2171/0", "title": "Proceedings. Computer Graphics International", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acssc/1988/9999/1/00754002", "title": "Rendering Of Texture On 3D Surfaces", "doi": null, "abstractUrl": "/proceedings-article/acssc/1988/00754002/12OmNxxNbPS", "parentPublication": { "id": "proceedings/acssc/1988/9999/1", "title": "Twenty-Second Asilomar Conference on Signals, Systems and Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2001/1227/0/12270322", "title": "Non-Photorealistic Rendering Using Watercolor Inspired Textures and Illumination", "doi": null, "abstractUrl": "/proceedings-article/pg/2001/12270322/12OmNy4r41q", "parentPublication": { "id": "proceedings/pg/2001/1227/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciap/1999/0040/0/00401055", "title": "Texture Extraction from Photographs and Rendering with Dynamic Texture Mapping", "doi": null, "abstractUrl": "/proceedings-article/iciap/1999/00401055/12OmNz61drx", "parentPublication": { "id": "proceedings/iciap/1999/0040/0", "title": "Image Analysis and Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/27660028", "title": "View-Dependent Rendering of Multiresolution Texture-Atlases", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660028/12OmNzXWZDD", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2000/6478/0/64780038", "title": "Pen-and-Ink Rendering in Volume Visualization", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2000/64780038/12OmNzcxZjW", "parentPublication": { "id": "proceedings/ieee-vis/2000/6478/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2003/04/mcg2003040054", "title": "Stylized Highlights for Cartoon Rendering and Animation", "doi": null, "abstractUrl": "/magazine/cg/2003/04/mcg2003040054/13rRUwInvnh", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/1997/02/mcg1997020018", "title": "Multiresolution Textures from Image Sequences", "doi": null, "abstractUrl": "/magazine/cg/1997/02/mcg1997020018/13rRUxN5eyd", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2008020263", "articleId": "13rRUIIVlkb", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2008020277", "articleId": "13rRUxly8SQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyxXloT", "title": "March/April", "year": "2008", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "14", "label": "March/April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxly8SQ", "doi": "10.1109/TVCG.2007.70408", "abstract": "Abstract—We present techniques for warping and blending (or subtracting) geometric textures onto surfaces represented by high resolution level sets. The geometric texture itself can be represented either explicitly as a polygonal mesh or implicitly as a level set. Unlike previous approaches, we can produce topologically connected surfaces with smooth blending and low distortion. Specifically, we offer two different solutions to the problem of adding fine-scale geometric detail to surfaces. Both solutions assume a level set representation of the base surface which is easily achieved by means of a mesh-to-level-set scan conversion. To facilitate our mapping, we parameterize the embedding space of the base level set surface using fast particle advection. We can then warp explicit texture meshes onto this surface at nearly interactive speeds or blend level set representations of the texture to produce high-quality surfaces with smooth transitions.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—We present techniques for warping and blending (or subtracting) geometric textures onto surfaces represented by high resolution level sets. The geometric texture itself can be represented either explicitly as a polygonal mesh or implicitly as a level set. Unlike previous approaches, we can produce topologically connected surfaces with smooth blending and low distortion. Specifically, we offer two different solutions to the problem of adding fine-scale geometric detail to surfaces. Both solutions assume a level set representation of the base surface which is easily achieved by means of a mesh-to-level-set scan conversion. To facilitate our mapping, we parameterize the embedding space of the base level set surface using fast particle advection. We can then warp explicit texture meshes onto this surface at nearly interactive speeds or blend level set representations of the texture to produce high-quality surfaces with smooth transitions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—We present techniques for warping and blending (or subtracting) geometric textures onto surfaces represented by high resolution level sets. The geometric texture itself can be represented either explicitly as a polygonal mesh or implicitly as a level set. Unlike previous approaches, we can produce topologically connected surfaces with smooth blending and low distortion. Specifically, we offer two different solutions to the problem of adding fine-scale geometric detail to surfaces. Both solutions assume a level set representation of the base surface which is easily achieved by means of a mesh-to-level-set scan conversion. To facilitate our mapping, we parameterize the embedding space of the base level set surface using fast particle advection. We can then warp explicit texture meshes onto this surface at nearly interactive speeds or blend level set representations of the texture to produce high-quality surfaces with smooth transitions.", "title": "Geometric Texturing Using Level Sets", "normalizedTitle": "Geometric Texturing Using Level Sets", "fno": "ttg2008020277", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Geometric Texture Mapping", "Parameterization", "Implicit Surfaces", "Volume Texturing", "Geometric Modeling" ], "authors": [ { "givenName": "Anders", "surname": "Brodersen", "fullName": "Anders Brodersen", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Ken", "surname": "Museth", "fullName": "Ken Museth", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Serban", "surname": "Porumbescu", "fullName": "Serban Porumbescu", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Brian", "surname": "Budge", "fullName": "Brian Budge", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2008-03-01 00:00:00", "pubType": "trans", "pages": "277-288", "year": "2008", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/pg/2000/0868/0/08680213", "title": "Dynamic PDE Surfaces with Flexible and General Geometric Constraints", "doi": null, "abstractUrl": "/proceedings-article/pg/2000/08680213/12OmNBkxspv", "parentPublication": { "id": "proceedings/pg/2000/0868/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgi/1999/0185/0/01850025", "title": "A Field Interpolated Texture Mapping Algorithm for Skeletal Implicit Surfaces", "doi": null, "abstractUrl": "/proceedings-article/cgi/1999/01850025/12OmNBuL1dv", "parentPublication": { "id": "proceedings/cgi/1999/0185/0", "title": "Computer Graphics International Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smi/2003/1845/0/18450013", "title": "Generalized Functional and Decorative Filleting and Blending Operations", "doi": null, "abstractUrl": "/proceedings-article/smi/2003/18450013/12OmNqzcvA0", "parentPublication": { "id": "proceedings/smi/2003/1845/0", "title": "Shape Modeling and Applications, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gmp/2000/0562/0/05620321", "title": "Interval Methods in Geometric Modeling", "doi": null, "abstractUrl": "/proceedings-article/gmp/2000/05620321/12OmNwMob8s", "parentPublication": { "id": "proceedings/gmp/2000/0562/0", "title": "Geometric Modeling and Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2006/2606/0/26060534", "title": "Implicit Blends with an Individual Blending Range Control on Every Primitive?s Subsequent Blend", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2006/26060534/12OmNxGj9Kb", "parentPublication": { "id": "proceedings/cgiv/2006/2606/0", "title": "International Conference on Computer Graphics, Imaging and Visualisation (CGIV'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/05/v0939", "title": "Texturing Fluids", "doi": null, "abstractUrl": "/journal/tg/2007/05/v0939/13rRUwbs20O", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2005/02/mcg2005020036", "title": "Bounded Blending for Function-Based Shape Modeling", "doi": null, "abstractUrl": "/magazine/cg/2005/02/mcg2005020036/13rRUwjoNCb", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/03/v0549", "title": "Free-Form Geometric Modeling by Integrating Parametric and Implicit PDEs", "doi": null, "abstractUrl": "/journal/tg/2007/03/v0549/13rRUwkfAZc", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2002/04/v0346", "title": "Robust Creation of Implicit Surfaces from Polygonal Meshes", "doi": null, "abstractUrl": "/journal/tg/2002/04/v0346/13rRUxD9h4Y", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2000/05/i0445", "title": "Snake Pedals: Compact and Versatile Geometric Models with Physics-Based Control", "doi": null, "abstractUrl": "/journal/tp/2000/05/i0445/13rRUy0HYKQ", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2008020468", "articleId": "13rRUEgs2BO", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2008020289", "articleId": "13rRUxYIMUR", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyxXloT", "title": "March/April", "year": "2008", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "14", "label": "March/April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxYIMUR", "doi": "10.1109/TVCG.2007.70414", "abstract": "In this paper prefiltered reconstruction techniquesare evaluated for volume-rendering applications. All the analyzedmethods perform a discrete prefiltering as a preprocessing of theinput samples in order to improve the quality of the continuousreconstruction afterwards. Various prefiltering schemes havebeen proposed to fulfill either spatial-domain or frequencydomaincriteria. According to our best knowledge, however, theirthorough comparative study has not been published yet. Thereforewe derive the frequency responses of the different prefilteredreconstruction techniques to analyze their global behavior suchas aliasing or smoothing. Furthermore, we introduce a novelmathematical basis to compare also their spatial-domain behaviorin terms of the asymptotic local error effect. For the sake of faircomparison, we use the same linear and cubic B-splines as basisfunctions but combined with different discrete prefilters. Ourgoal with this analysis is to help the potential users to select theoptimal prefiltering scheme for their specific applications.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper prefiltered reconstruction techniquesare evaluated for volume-rendering applications. All the analyzedmethods perform a discrete prefiltering as a preprocessing of theinput samples in order to improve the quality of the continuousreconstruction afterwards. Various prefiltering schemes havebeen proposed to fulfill either spatial-domain or frequencydomaincriteria. According to our best knowledge, however, theirthorough comparative study has not been published yet. Thereforewe derive the frequency responses of the different prefilteredreconstruction techniques to analyze their global behavior suchas aliasing or smoothing. Furthermore, we introduce a novelmathematical basis to compare also their spatial-domain behaviorin terms of the asymptotic local error effect. For the sake of faircomparison, we use the same linear and cubic B-splines as basisfunctions but combined with different discrete prefilters. Ourgoal with this analysis is to help the potential users to select theoptimal prefiltering scheme for their specific applications.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper prefiltered reconstruction techniquesare evaluated for volume-rendering applications. All the analyzedmethods perform a discrete prefiltering as a preprocessing of theinput samples in order to improve the quality of the continuousreconstruction afterwards. Various prefiltering schemes havebeen proposed to fulfill either spatial-domain or frequencydomaincriteria. According to our best knowledge, however, theirthorough comparative study has not been published yet. Thereforewe derive the frequency responses of the different prefilteredreconstruction techniques to analyze their global behavior suchas aliasing or smoothing. Furthermore, we introduce a novelmathematical basis to compare also their spatial-domain behaviorin terms of the asymptotic local error effect. For the sake of faircomparison, we use the same linear and cubic B-splines as basisfunctions but combined with different discrete prefilters. Ourgoal with this analysis is to help the potential users to select theoptimal prefiltering scheme for their specific applications.", "title": "An Evaluation of Prefiltered Reconstruction Schemes for Volume Rendering", "normalizedTitle": "An Evaluation of Prefiltered Reconstruction Schemes for Volume Rendering", "fno": "ttg2008020289", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Filtering", "Sampling", "Volume Visualization" ], "authors": [ { "givenName": "Balázs", "surname": "Csébfalvi", "fullName": "Balázs Csébfalvi", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2008-03-01 00:00:00", "pubType": "trans", "pages": "289-301", "year": "2008", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/2005/2766/0/27660040", "title": "Prefiltered Gaussian Reconstruction for High-Quality Rendering of Volumetric Data sampled on a Body-Centered Cubic Grid", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660040/12OmNAfy7Ky", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1994/6627/0/00346331", "title": "An evaluation of reconstruction filters for volume rendering", "doi": null, "abstractUrl": "/proceedings-article/visual/1994/00346331/12OmNB7cjk3", "parentPublication": { "id": "proceedings/visual/1994/6627/0", "title": "Proceedings Visualization '94", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2003/2030/0/20300059", "title": "Monte Carlo Volume Rendering", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2003/20300059/12OmNCdBDFe", "parentPublication": { "id": "proceedings/ieee-vis/2003/2030/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532810", "title": "Prefiltered Gaussian reconstruction for high-quality rendering of volumetric data sampled on a body-centered cubic grid", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532810/12OmNwFzO0f", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2011/4602/0/4602a158", "title": "An Adaptive Sampling Based Parallel Volume Rendering Algorithm", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2011/4602a158/12OmNxE2mHp", "parentPublication": { "id": "proceedings/icvrv/2011/4602/0", "title": "2011 International Conference on Virtual Reality and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/27660038", "title": "Scale-Invariant Volume Rendering", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660038/12OmNxb5hu0", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acssc/1992/3160/0/00269274", "title": "On optimal prefiltering for wavelet coefficient computation", "doi": null, "abstractUrl": "/proceedings-article/acssc/1992/00269274/12OmNy7yEen", "parentPublication": { "id": "proceedings/acssc/1992/3160/0", "title": "Conference Record of the Twenty-Sixth Asilomar Conference on Signals, Systems & Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/03/ttg2010030499", "title": "An Evaluation of Prefiltered B-Spline Reconstruction for Quasi-Interpolation on the Body-Centered Cubic Lattice", "doi": null, "abstractUrl": "/journal/tg/2010/03/ttg2010030499/13rRUEgarBn", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2001/03/v0242", "title": "Two-Level Volume Rendering", "doi": null, "abstractUrl": "/journal/tg/2001/03/v0242/13rRUxC0SOO", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/12/ttg2012122335", "title": "Fuzzy Volume Rendering", "doi": null, "abstractUrl": "/journal/tg/2012/12/ttg2012122335/13rRUyeTVi0", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2008020277", "articleId": "13rRUxly8SQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2008020302", "articleId": "13rRUwjGoLA", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyxXloT", "title": "March/April", "year": "2008", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "14", "label": "March/April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwjGoLA", "doi": "10.1109/TVCG.2007.70428", "abstract": "We present a novel approach for latency-tolerant delivery of visualization and rendering results where client-side frame rate display performance is independent of source dataset size, image size, visualization technique or rendering complexity. Our approach delivers pre-rendered, multiresolution images to a remote user as they navigate through different viewpoints, visualization or rendering parameters. We employ demand-driven tiled, multiresolution image streaming and prefetching to efficiently utilize available bandwidth while providing the maximum resolution user can perceive from a given viewpoint. Since image data is the only input to our system, our approach is generally applicable to all visualization and graphics rendering applications capable of generating image files in an ordered fashion. In our implementation, a normal web server provides on-demand images to a remote custom client application, which uses client-pull to obtain and cache only those images required to fulfill the interaction needs. The main contributions of this work are: (1) an architecture for latency-tolerant, remote delivery of precomputed imagery suitable for use with any visualization or rendering application capable of producing images in an ordered fashion; (2) a performance study showing the impact of diverse network environments and different tunable system parameters on end-to-end system performance in terms of deliverable frames per second.", "abstracts": [ { "abstractType": "Regular", "content": "We present a novel approach for latency-tolerant delivery of visualization and rendering results where client-side frame rate display performance is independent of source dataset size, image size, visualization technique or rendering complexity. Our approach delivers pre-rendered, multiresolution images to a remote user as they navigate through different viewpoints, visualization or rendering parameters. We employ demand-driven tiled, multiresolution image streaming and prefetching to efficiently utilize available bandwidth while providing the maximum resolution user can perceive from a given viewpoint. Since image data is the only input to our system, our approach is generally applicable to all visualization and graphics rendering applications capable of generating image files in an ordered fashion. In our implementation, a normal web server provides on-demand images to a remote custom client application, which uses client-pull to obtain and cache only those images required to fulfill the interaction needs. The main contributions of this work are: (1) an architecture for latency-tolerant, remote delivery of precomputed imagery suitable for use with any visualization or rendering application capable of producing images in an ordered fashion; (2) a performance study showing the impact of diverse network environments and different tunable system parameters on end-to-end system performance in terms of deliverable frames per second.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a novel approach for latency-tolerant delivery of visualization and rendering results where client-side frame rate display performance is independent of source dataset size, image size, visualization technique or rendering complexity. Our approach delivers pre-rendered, multiresolution images to a remote user as they navigate through different viewpoints, visualization or rendering parameters. We employ demand-driven tiled, multiresolution image streaming and prefetching to efficiently utilize available bandwidth while providing the maximum resolution user can perceive from a given viewpoint. Since image data is the only input to our system, our approach is generally applicable to all visualization and graphics rendering applications capable of generating image files in an ordered fashion. In our implementation, a normal web server provides on-demand images to a remote custom client application, which uses client-pull to obtain and cache only those images required to fulfill the interaction needs. The main contributions of this work are: (1) an architecture for latency-tolerant, remote delivery of precomputed imagery suitable for use with any visualization or rendering application capable of producing images in an ordered fashion; (2) a performance study showing the impact of diverse network environments and different tunable system parameters on end-to-end system performance in terms of deliverable frames per second.", "title": "Interactive, Internet Delivery of Visualization via Structured Prerendered Multiresolution Imagery", "normalizedTitle": "Interactive, Internet Delivery of Visualization via Structured Prerendered Multiresolution Imagery", "fno": "ttg2008020302", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Visualization Systems And Software", "Evolving Internet Applications", "Distributed Network Graphics" ], "authors": [ { "givenName": "Jerry", "surname": "Chen", "fullName": "Jerry Chen", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Ilmi", "surname": "Yoon", "fullName": "Ilmi Yoon", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Wes", "surname": "Bethel", "fullName": "Wes Bethel", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2008-03-01 00:00:00", "pubType": "trans", "pages": "302-312", "year": "2008", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ipdps/2001/0990/3/099030181", "title": "Comparison of Remote Visualization Strategies for Interactive Exploration of Large Data Sets", "doi": null, "abstractUrl": "/proceedings-article/ipdps/2001/099030181/12OmNCesr6n", "parentPublication": { "id": "proceedings/ipdps/2001/0990/3", "title": "Parallel and Distributed Processing Symposium, International", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1999/5897/0/00809908", "title": "Multiresolution techniques for interactive texture-based volume visualization", "doi": null, "abstractUrl": "/proceedings-article/visual/1999/00809908/12OmNwlHSSP", "parentPublication": { "id": "proceedings/visual/1999/5897/0", "title": "Proceedings Visualization '99", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2011/4602/0/4602a199", "title": "View-Dependent Interactive Visualization Methods for Multiresolution Datasets in JaVis", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2011/4602a199/12OmNxymocM", "parentPublication": { "id": "proceedings/icvrv/2011/4602/0", "title": "2011 International Conference on Virtual Reality and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/1997/8076/0/80760148", "title": "Binary space partitioning trees: a multiresolution approach", "doi": null, "abstractUrl": "/proceedings-article/iv/1997/80760148/12OmNyjccyk", "parentPublication": { "id": "proceedings/iv/1997/8076/0", "title": "Proceedings. 1997 IEEE Conference on Information Visualization (Cat. No.97TB100165)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2014/6854/0/6854a110", "title": "Research of Collaborative Interactive Visualization for Medical Imaging", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2014/6854a110/12OmNzzP5Ql", "parentPublication": { "id": "proceedings/icvrv/2014/6854/0", "title": "2014 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2004/03/v0353", "title": "Efficient Implementation of Real-Time View-Dependent Multiresolution Meshing", "doi": null, "abstractUrl": "/journal/tg/2004/03/v0353/13rRUwfI0PX", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/03/ttg2008030576", "title": "Interactive View-Dependent Rendering over Networks", "doi": null, "abstractUrl": "/journal/tg/2008/03/ttg2008030576/13rRUxC0SOT", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/05/ttg2010050729", "title": "Interactive Indirect Illumination Using Adaptive Multiresolution Splatting", "doi": null, "abstractUrl": "/journal/tg/2010/05/ttg2010050729/13rRUxD9h53", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1999/5897/0/00809908", "title": "Multiresolution techniques for interactive texture-based volume visualization", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1999/00809908/1h0KNaivmHm", "parentPublication": { "id": "proceedings/ieee-vis/1999/5897/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dagstuhl/1997/0503/0/01423129", "title": "Multiresolution and Adaptive Rendering Techniques for Structured, Curvilinear Data", "doi": null, "abstractUrl": "/proceedings-article/dagstuhl/1997/01423129/1h0N2fIfMMU", "parentPublication": { "id": "proceedings/dagstuhl/1997/0503/0", "title": "Dagstuhl '97 - Scientific Visualization Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2008020289", "articleId": "13rRUxYIMUR", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2008020329", "articleId": "13rRUwbs2gl", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyxXloT", "title": "March/April", "year": "2008", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "14", "label": "March/April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwbs2gl", "doi": "10.1109/TVCG.2007.70431", "abstract": "This paper provides a formal connexion between springs and continuum mechanics in the context of one-dimensional and two-dimensional elasticity. In a first stage, the equivalence between tensile springs and the finite element discretization of stretching energy on planar curves is established. Furthermore, when considering a quadratic strain function of stretch, we introduce a new type of springs called tensile biquadratic springs. In a second stage, we extend this equivalence to non-linear membranes (St Venant-Kirchhoff materials) on triangular meshes leading to triangular biquadratic and quadratic springs. Those tensile and angular springs produce isotropic deformations parameterized by Young modulus and Poisson ratios on unstructured meshes in an efficient and simple way. For a specific choice of the Poisson ratio, 0.3, we show that regular spring-mass models may be used realistically to simulate a membrane behavior. Finally, the different spring formulations are tested in pure traction and cloth simulation experiments.", "abstracts": [ { "abstractType": "Regular", "content": "This paper provides a formal connexion between springs and continuum mechanics in the context of one-dimensional and two-dimensional elasticity. In a first stage, the equivalence between tensile springs and the finite element discretization of stretching energy on planar curves is established. Furthermore, when considering a quadratic strain function of stretch, we introduce a new type of springs called tensile biquadratic springs. In a second stage, we extend this equivalence to non-linear membranes (St Venant-Kirchhoff materials) on triangular meshes leading to triangular biquadratic and quadratic springs. Those tensile and angular springs produce isotropic deformations parameterized by Young modulus and Poisson ratios on unstructured meshes in an efficient and simple way. For a specific choice of the Poisson ratio, 0.3, we show that regular spring-mass models may be used realistically to simulate a membrane behavior. Finally, the different spring formulations are tested in pure traction and cloth simulation experiments.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper provides a formal connexion between springs and continuum mechanics in the context of one-dimensional and two-dimensional elasticity. In a first stage, the equivalence between tensile springs and the finite element discretization of stretching energy on planar curves is established. Furthermore, when considering a quadratic strain function of stretch, we introduce a new type of springs called tensile biquadratic springs. In a second stage, we extend this equivalence to non-linear membranes (St Venant-Kirchhoff materials) on triangular meshes leading to triangular biquadratic and quadratic springs. Those tensile and angular springs produce isotropic deformations parameterized by Young modulus and Poisson ratios on unstructured meshes in an efficient and simple way. For a specific choice of the Poisson ratio, 0.3, we show that regular spring-mass models may be used realistically to simulate a membrane behavior. Finally, the different spring formulations are tested in pure traction and cloth simulation experiments.", "title": "Triangular Springs for Modeling Nonlinear Membranes", "normalizedTitle": "Triangular Springs for Modeling Nonlinear Membranes", "fno": "ttg2008020329", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Animation", "Physically Based Modeling", "Animation" ], "authors": [ { "givenName": "Herv?", "surname": "Delingette", "fullName": "Herv? Delingette", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2008-03-01 00:00:00", "pubType": "trans", "pages": "329-341", "year": "2008", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/svr/2013/5001/0/06655784", "title": "Cloth Simulation Using Triangular Mesh: A Study of Mesh Adaptivity", "doi": null, "abstractUrl": "/proceedings-article/svr/2013/06655784/12OmNA1VnsC", "parentPublication": { "id": "proceedings/svr/2013/5001/0", "title": "2013 XV Symposium on Virtual and Augmented Reality (SVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ca/1995/7062/0/70620138", "title": "Animating deformable models: different approaches", "doi": null, "abstractUrl": "/proceedings-article/ca/1995/70620138/12OmNxA3Z7L", "parentPublication": { "id": "proceedings/ca/1995/7062/0", "title": "Computer Animation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ca/2001/7237/0/00982374", "title": "A physically-based model with adaptive refinement for facial animation", "doi": null, "abstractUrl": "/proceedings-article/ca/2001/00982374/12OmNxRWI7R", "parentPublication": { "id": "proceedings/ca/2001/7237/0", "title": "Proceedings Computer Animation 2001. Fourteenth Conference on Computer Animation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgi/2004/2171/0/21710528", "title": "Stabilizing Explicit Methods in Spring-Mass Simulation", "doi": null, "abstractUrl": "/proceedings-article/cgi/2004/21710528/12OmNzBwGuG", "parentPublication": { "id": "proceedings/cgi/2004/2171/0", "title": "Proceedings. Computer Graphics International", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/04/ttg2008040797", "title": "Two-Way Coupled SPH and Particle Level Set Fluid Simulation", "doi": null, "abstractUrl": "/journal/tg/2008/04/ttg2008040797/13rRUxE04tu", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2007/06/mcg2007060087", "title": "Spring-Bead Animation of Viscoelastic Materials", "doi": null, "abstractUrl": "/magazine/cg/2007/06/mcg2007060087/13rRUxjQy6F", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/03/ttg2009030493", "title": "Direct Forcing for Lagrangian Rigid-Fluid Coupling", "doi": null, "abstractUrl": "/journal/tg/2009/03/ttg2009030493/13rRUxlgy3z", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2005/01/v0091", "title": "Modal Warping: Real-Time Simulation of Large Rotational Deformation and Manipulation", "doi": null, "abstractUrl": "/journal/tg/2005/01/v0091/13rRUy3xY80", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/02/ttg2012020228", "title": "Cubical Mass-Spring Model Design Based on a Tensile Deformation Test and Nonlinear Material Model", "doi": null, "abstractUrl": "/journal/tg/2012/02/ttg2012020228/13rRUygT7y7", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2009/04/mcg2009040019", "title": "Brain Springs: Fast Physics for Large Crowds in WALL•E", "doi": null, "abstractUrl": "/magazine/cg/2009/04/mcg2009040019/13rRUzpzeDw", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2008020302", "articleId": "13rRUwjGoLA", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2008020342", "articleId": "13rRUxcsYLG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyxXloT", "title": "March/April", "year": "2008", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "14", "label": "March/April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxcsYLG", "doi": "10.1109/TVCG.2007.70434", "abstract": "We present a practical approach to generate stochastic anisotropic samples with Poisson-disk characteristic over a two-dimensional domain. In contrast to isotropic samples, we understand anisotropic samples as non-overlapping ellipses whose size and density match a given anisotropic metric. Anisotropic noise samples are useful for many visualization and graphics applications. The spot samples can be used as input for texture generation, e.g., line integral convolution (LIC), but can also be used directly for visualization. The definition of the spot samples using a metric tensor makes them especially suitable for the visualization of tensor fields that can be translated into a metric. Our work combines ideas from sampling theory and mesh generation. To generate these samples with the desired properties we construct a first set of non-overlapping ellipses whose distribution closely matches the underlying metric. This set of samples is used as input for a generalized anisotropic Lloyd relaxation to distribute noise samples more evenly. Instead of computing the Voronoi tessellation explicitly, we introduce a discrete approach which combines the Voronoi cell and centroid computation in one step. Our method supports automatic packing of the elliptical samples, resulting in textures similar to those generated by anisotropic reaction-diffusion methods. We use Fourier analysis tools for quality measurement of uniformly distributed samples.", "abstracts": [ { "abstractType": "Regular", "content": "We present a practical approach to generate stochastic anisotropic samples with Poisson-disk characteristic over a two-dimensional domain. In contrast to isotropic samples, we understand anisotropic samples as non-overlapping ellipses whose size and density match a given anisotropic metric. Anisotropic noise samples are useful for many visualization and graphics applications. The spot samples can be used as input for texture generation, e.g., line integral convolution (LIC), but can also be used directly for visualization. The definition of the spot samples using a metric tensor makes them especially suitable for the visualization of tensor fields that can be translated into a metric. Our work combines ideas from sampling theory and mesh generation. To generate these samples with the desired properties we construct a first set of non-overlapping ellipses whose distribution closely matches the underlying metric. This set of samples is used as input for a generalized anisotropic Lloyd relaxation to distribute noise samples more evenly. Instead of computing the Voronoi tessellation explicitly, we introduce a discrete approach which combines the Voronoi cell and centroid computation in one step. Our method supports automatic packing of the elliptical samples, resulting in textures similar to those generated by anisotropic reaction-diffusion methods. We use Fourier analysis tools for quality measurement of uniformly distributed samples.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a practical approach to generate stochastic anisotropic samples with Poisson-disk characteristic over a two-dimensional domain. In contrast to isotropic samples, we understand anisotropic samples as non-overlapping ellipses whose size and density match a given anisotropic metric. Anisotropic noise samples are useful for many visualization and graphics applications. The spot samples can be used as input for texture generation, e.g., line integral convolution (LIC), but can also be used directly for visualization. The definition of the spot samples using a metric tensor makes them especially suitable for the visualization of tensor fields that can be translated into a metric. Our work combines ideas from sampling theory and mesh generation. To generate these samples with the desired properties we construct a first set of non-overlapping ellipses whose distribution closely matches the underlying metric. This set of samples is used as input for a generalized anisotropic Lloyd relaxation to distribute noise samples more evenly. Instead of computing the Voronoi tessellation explicitly, we introduce a discrete approach which combines the Voronoi cell and centroid computation in one step. Our method supports automatic packing of the elliptical samples, resulting in textures similar to those generated by anisotropic reaction-diffusion methods. We use Fourier analysis tools for quality measurement of uniformly distributed samples.", "title": "Anisotropic Noise Samples", "normalizedTitle": "Anisotropic Noise Samples", "fno": "ttg2008020342", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Flow Visualization", "Computer Graphics", "Picture Image Generation", "Sampling", "Relaxation" ], "authors": [ { "givenName": "Louis", "surname": "Feng", "fullName": "Louis Feng", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Ingrid", "surname": "Hotz", "fullName": "Ingrid Hotz", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Bernd", "surname": "Hamann", "fullName": "Bernd Hamann", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Kenneth", "surname": "Joy", "fullName": "Kenneth Joy", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2008-03-01 00:00:00", "pubType": "trans", "pages": "342-354", "year": "2008", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cbms/2008/3165/0/3165a029", "title": "Noise Filtering Using Edge-Driven Adaptive Anisotropic Diffusion", "doi": null, "abstractUrl": "/proceedings-article/cbms/2008/3165a029/12OmNB1wkMx", "parentPublication": { "id": "proceedings/cbms/2008/3165/0", "title": "2008 21st IEEE International Symposium on Computer-Based Medical Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isvd/2011/4483/0/4483a091", "title": "Rescue Boat Voronoi Diagrams for Inhomogeneous, Anisotropic, and Time-Varying Distances", "doi": null, "abstractUrl": "/proceedings-article/isvd/2011/4483a091/12OmNwcUk0B", "parentPublication": { "id": "proceedings/isvd/2011/4483/0", "title": "2011 Eighth International Symposium on Voronoi Diagrams in Science and Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isvd/2013/5037/0/5037a047", "title": "Voronoi Diagrams from (Possibly Discontinuous) Embeddings", "doi": null, "abstractUrl": "/proceedings-article/isvd/2013/5037a047/12OmNyQGSnr", "parentPublication": { "id": "proceedings/isvd/2013/5037/0", "title": "2013 10th International Symposium on Voronoi Diagrams in Science and Engineering (ISVD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109c664", "title": "Multichannel Image Regularisation Using Anisotropic Geodesic Filtering", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109c664/12OmNzFMFl4", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2007/1016/0/04284694", "title": "Anisotropic Manifold Ranking for Video Annotation", "doi": null, "abstractUrl": "/proceedings-article/icme/2007/04284694/12OmNzhELfS", "parentPublication": { "id": "proceedings/icme/2007/1016/0", "title": "2007 International Conference on Multimedia & Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2009/4420/0/05459425", "title": "Image compression with anisotropic triangulations", "doi": null, "abstractUrl": "/proceedings-article/iccv/2009/05459425/12OmNzlly5j", "parentPublication": { "id": "proceedings/iccv/2009/4420/0", "title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/07/ttg2013071143", "title": "Generalized Anisotropic Stratified Surface Sampling", "doi": null, "abstractUrl": "/journal/tg/2013/07/ttg2013071143/13rRUILc8fc", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/11/ttg2013111782", "title": "Anisotropic Sampling of Planar and Two-Manifold Domains for Texture Generation and Glyph Distribution", "doi": null, "abstractUrl": "/journal/tg/2013/11/ttg2013111782/13rRUNvgziE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/1990/02/i0211", "title": "Texture Segmentation Using Voronoi Polygons", "doi": null, "abstractUrl": "/journal/tp/1990/02/i0211/13rRUxASuNo", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/02/ttg2008020369", "title": "Generic Remeshing of 3D Triangular Meshes with Metric-Dependent Discrete Voronoi Diagrams", "doi": null, "abstractUrl": "/journal/tg/2008/02/ttg2008020369/13rRUxBJhvn", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2008020329", "articleId": "13rRUwbs2gl", "__typename": "AdjacentArticleType" }, "next": { "fno": "04435116", "articleId": "13rRUxcKzVh", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyxXloT", "title": "March/April", "year": "2008", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "14", "label": "March/April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxcKzVh", "doi": "10.1109/TVCG.2008.15", "abstract": "Provides instructions and guidelines to prospective authors who wish to submit manuscripts.", "abstracts": [ { "abstractType": "Regular", "content": "Provides instructions and guidelines to prospective authors who wish to submit manuscripts.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Provides instructions and guidelines to prospective authors who wish to submit manuscripts.", "title": "TVCG Information for authors", "normalizedTitle": "TVCG Information for authors", "fno": "04435116", "hasPdf": true, "idPrefix": "tg", "keywords": [], "authors": [], "replicability": null, "showBuyMe": false, "showRecommendedArticles": false, "isOpenAccess": true, "issueNum": "02", "pubDate": "2008-03-01 00:00:00", "pubType": "trans", "pages": "c3", "year": "2008", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "ttg2008020342", "articleId": "13rRUxcsYLG", "__typename": "AdjacentArticleType" }, "next": { "fno": "04435117", "articleId": "13rRUwh80H6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyxXloT", "title": "March/April", "year": "2008", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "14", "label": "March/April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwh80H6", "doi": "10.1109/TVCG.2008.16", "abstract": "Provides a listing of current staff, committee members and society officers.", "abstracts": [ { "abstractType": "Regular", "content": "Provides a listing of current staff, committee members and society officers.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Provides a listing of current staff, committee members and society officers.", "title": "[Back cover]", "normalizedTitle": "[Back cover]", "fno": "04435117", "hasPdf": true, "idPrefix": "tg", "keywords": [], "authors": [], "replicability": null, "showBuyMe": false, "showRecommendedArticles": false, "isOpenAccess": true, "issueNum": "02", "pubDate": "2008-03-01 00:00:00", "pubType": "trans", "pages": "c4", "year": "2008", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "04435116", "articleId": "13rRUxcKzVh", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2008020355", "articleId": "13rRUwd9CFZ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyxXloT", "title": "March/April", "year": "2008", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "14", "label": "March/April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwd9CFZ", "doi": "10.1109/TVCG.2008.23", "abstract": "We present a practical algorithm for computing robust, multiscale curve and surface skeletons of 3D objects. Based on a model which follows an advection principle, we assign to each point on the skeleton a part of the object surface, called the collapse. The size of the collapse is used as a uniform importance measure for the curve and surface skeleton, so that both can be simplified by imposing a single threshold on this intuitive measure. The simplified skeletons are connected by default, without special precautions, due to the monotonicity of the importance measure. The skeletons possess additional desirable properties: They are centered, robust to noise, hierarchical, and provide a natural skeleton-to-boundary mapping. We present a voxel-based algorithm that is straightforward to implement and simple to use. We illustrate our method on several realistic 3D objects.", "abstracts": [ { "abstractType": "Regular", "content": "We present a practical algorithm for computing robust, multiscale curve and surface skeletons of 3D objects. Based on a model which follows an advection principle, we assign to each point on the skeleton a part of the object surface, called the collapse. The size of the collapse is used as a uniform importance measure for the curve and surface skeleton, so that both can be simplified by imposing a single threshold on this intuitive measure. The simplified skeletons are connected by default, without special precautions, due to the monotonicity of the importance measure. The skeletons possess additional desirable properties: They are centered, robust to noise, hierarchical, and provide a natural skeleton-to-boundary mapping. We present a voxel-based algorithm that is straightforward to implement and simple to use. We illustrate our method on several realistic 3D objects.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a practical algorithm for computing robust, multiscale curve and surface skeletons of 3D objects. Based on a model which follows an advection principle, we assign to each point on the skeleton a part of the object surface, called the collapse. The size of the collapse is used as a uniform importance measure for the curve and surface skeleton, so that both can be simplified by imposing a single threshold on this intuitive measure. The simplified skeletons are connected by default, without special precautions, due to the monotonicity of the importance measure. The skeletons possess additional desirable properties: They are centered, robust to noise, hierarchical, and provide a natural skeleton-to-boundary mapping. We present a voxel-based algorithm that is straightforward to implement and simple to use. We illustrate our method on several realistic 3D objects.", "title": "Computing Multiscale Curve and Surface Skeletons of Genus 0 Shapes Using a Global Importance Measure", "normalizedTitle": "Computing Multiscale Curve and Surface Skeletons of Genus 0 Shapes Using a Global Importance Measure", "fno": "ttg2008020355", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computational Geometry And Object Modeling" ], "authors": [ { "givenName": "Dennie", "surname": "Reniers", "fullName": "Dennie Reniers", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Jarke", "surname": "van Wijk", "fullName": "Jarke van Wijk", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Alexandru", "surname": "Telea", "fullName": "Alexandru Telea", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2008-03-01 00:00:00", "pubType": "trans", "pages": "355-368", "year": "2008", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2008/2339/0/04563018", "title": "Anisotropic Laplace-Beltrami eigenmaps: Bridging Reeb graphs and skeletons", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2008/04563018/12OmNBO3K32", "parentPublication": { "id": "proceedings/cvprw/2008/2339/0", "title": "2008 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smi/2010/7259/0/05521461", "title": "Point Cloud Skeletons via Laplacian Based Contraction", "doi": null, "abstractUrl": "/proceedings-article/smi/2010/05521461/12OmNCvLXYS", "parentPublication": { "id": "proceedings/smi/2010/7259/0", "title": "Shape Modeling International (SMI 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/1994/6265/1/00576381", "title": "Using polyballs to approximate shapes and skeletons", "doi": null, "abstractUrl": "/proceedings-article/icpr/1994/00576381/12OmNwDACpP", "parentPublication": { "id": "proceedings/icpr/1994/6265/1", "title": "Proceedings of 12th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1992/2855/0/00223226", "title": "Voronoi skeletons: theory and applications", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1992/00223226/12OmNxH9Xdu", "parentPublication": { "id": "proceedings/cvpr/1992/2855/0", "title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ssiai/2006/0069/0/01633715", "title": "On the Properties of Morphological Skeletons of Discrete Binary Image Using Double Structuring Elements", "doi": null, "abstractUrl": "/proceedings-article/ssiai/2006/01633715/12OmNy2rRWH", "parentPublication": { "id": "proceedings/ssiai/2006/0069/0", "title": "7th IEEE Southwest Symposium on Image Analysis and Interpretation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2010/7029/0/05543279", "title": "Straight skeletons for binary shapes", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2010/05543279/12OmNyUnEJa", "parentPublication": { "id": "proceedings/cvprw/2010/7029/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2001/11/i1296", "title": "Hierarchical Decomposition of Multiscale Skeletons", "doi": null, "abstractUrl": "/journal/tp/2001/11/i1296/13rRUILc8g5", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2008/12/ttp2008122204", "title": "Euclidean Skeletons of Digital Image and Volume Data in Linear Time by the Integer Medial Axis Transform", "doi": null, "abstractUrl": "/journal/tp/2008/12/ttp2008122204/13rRUx0PqqE", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2016/01/07066924", "title": "An Unified Multiscale Framework for Planar, Surface, and Curve Skeletonization", "doi": null, "abstractUrl": "/journal/tp/2016/01/07066924/13rRUxASuNO", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/1987/04/04767937", "title": "Multiple Resolution Skeletons", "doi": null, "abstractUrl": "/journal/tp/1987/04/04767937/13rRUxlgy4A", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "04435117", "articleId": "13rRUwh80H6", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2008020369", "articleId": "13rRUxBJhvn", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyxXloT", "title": "March/April", "year": "2008", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "14", "label": "March/April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxBJhvn", "doi": "10.1109/TVCG.2007.70430", "abstract": "In this paper, we propose a generic framework for 3D surface remeshing. Based on a metric-driven Discrete Voronoi Diagram construction, our output is an optimized 3D triangular mesh with a user defined vertex budget. Our approach can deal with a wide range of applications, from high quality mesh generation to shape approximation. By using appropriate metric constraints the method generates isotropic or anisotropic elements. Based on point-sampling, our algorithm combines the robustness and theoretical strength of Delaunay criteria with the efficiency of entirely discrete geometry processing . Besides the general described framework, we show experimental results using isotropic, quadric-enhanced isotropic and anisotropic metrics which prove the efficiency of our method on large meshes, for a low computational cost.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we propose a generic framework for 3D surface remeshing. Based on a metric-driven Discrete Voronoi Diagram construction, our output is an optimized 3D triangular mesh with a user defined vertex budget. Our approach can deal with a wide range of applications, from high quality mesh generation to shape approximation. By using appropriate metric constraints the method generates isotropic or anisotropic elements. Based on point-sampling, our algorithm combines the robustness and theoretical strength of Delaunay criteria with the efficiency of entirely discrete geometry processing . Besides the general described framework, we show experimental results using isotropic, quadric-enhanced isotropic and anisotropic metrics which prove the efficiency of our method on large meshes, for a low computational cost.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we propose a generic framework for 3D surface remeshing. Based on a metric-driven Discrete Voronoi Diagram construction, our output is an optimized 3D triangular mesh with a user defined vertex budget. Our approach can deal with a wide range of applications, from high quality mesh generation to shape approximation. By using appropriate metric constraints the method generates isotropic or anisotropic elements. Based on point-sampling, our algorithm combines the robustness and theoretical strength of Delaunay criteria with the efficiency of entirely discrete geometry processing . Besides the general described framework, we show experimental results using isotropic, quadric-enhanced isotropic and anisotropic metrics which prove the efficiency of our method on large meshes, for a low computational cost.", "title": "Generic Remeshing of 3D Triangular Meshes with Metric-Dependent Discrete Voronoi Diagrams", "normalizedTitle": "Generic Remeshing of 3D Triangular Meshes with Metric-Dependent Discrete Voronoi Diagrams", "fno": "ttg2008020369", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Approximation Of Surfaces And Contours", "Global Optimization", "Hierarchy And Geometric Transformations" ], "authors": [ { "givenName": "Sebastien", "surname": "Valette", "fullName": "Sebastien Valette", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Jean Marc", "surname": "Chassery", "fullName": "Jean Marc Chassery", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "R?my", "surname": "Prost", "fullName": "R?my Prost", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2008-03-01 00:00:00", "pubType": "trans", "pages": "369-381", "year": "2008", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iscv/2015/7511/0/07106191", "title": "Adaptive kernel for triangular meshes smoothing", "doi": null, "abstractUrl": "/proceedings-article/iscv/2015/07106191/12OmNBhZ4fF", "parentPublication": { "id": "proceedings/iscv/2015/7511/0", "title": "2015 Intelligent Systems and Computer Vision (ISCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2004/2234/0/22340207", "title": "Direct Anisotropic Quad-Dominant Remeshing", "doi": null, "abstractUrl": "/proceedings-article/pg/2004/22340207/12OmNCfjerO", "parentPublication": { "id": "proceedings/pg/2004/2234/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gmp/2000/0562/0/05620220", "title": "Using Most Isometric Parametrizations for Remeshing Polygonal Surfaces", "doi": null, "abstractUrl": "/proceedings-article/gmp/2000/05620220/12OmNwl8GJx", "parentPublication": { "id": "proceedings/gmp/2000/0562/0", "title": "Geometric Modeling and Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gmp/2004/2078/0/20780153", "title": "Triangular/Quadrilateral Remeshing of an Arbitrary Polygonal Surface via Packing Bubbles", "doi": null, "abstractUrl": "/proceedings-article/gmp/2004/20780153/12OmNzyGH6F", "parentPublication": { "id": "proceedings/gmp/2004/2078/0", "title": "Geometric Modeling and Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/10/06832586", "title": "Low-Resolution Remeshing Using the Localized Restricted Voronoi Diagram", "doi": null, "abstractUrl": "/journal/tg/2014/10/06832586/13rRUIM2VBI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/07/08361045", "title": "Isotropic Surface Remeshing without Large and Small Angles", "doi": null, "abstractUrl": "/journal/tg/2019/07/08361045/13rRUIM2VBN", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2000/02/mcg2000020062", "title": "Metamorphosis of Arbitrary Triangular Meshes", "doi": null, "abstractUrl": "/magazine/cg/2000/02/mcg2000020062/13rRUNvyanl", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2011/08/ttp2011081502", "title": "Construction of Iso-Contours, Bisectors, and Voronoi Diagrams on Triangulated Surfaces", "doi": null, "abstractUrl": "/journal/tp/2011/08/ttp2011081502/13rRUxNmPF4", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/02/ttg2008020342", "title": "Anisotropic Noise Samples", "doi": null, "abstractUrl": "/journal/tg/2008/02/ttg2008020342/13rRUxcsYLG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09978684", "title": "Adaptively Isotropic Remeshing based on Curvature Smoothed Field", "doi": null, "abstractUrl": "/journal/tg/5555/01/09978684/1IXUnEM2oc8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2008020355", "articleId": "13rRUwd9CFZ", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2008020382", "articleId": "13rRUyv53Fh", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyxXloT", "title": "March/April", "year": "2008", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "14", "label": "March/April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUyv53Fh", "doi": "10.1109/TVCG.2007.70432", "abstract": "Texture mapping with positional constraints is an important and challenging problem in computer graphics. In this paper, we first present a theoretically robust, foldover-free 2D mesh warping algorithm. Then we apply this warping algorithm to handle mapping texture onto 3D meshes with hard constraints. The proposed algorithm is experimentally evaluated and compared with the state-of-the-art method for examples with more challenging constraints. These challenging constraints may lead to large distortions and foldovers. Experimental results show that the proposed scheme can generate more pleasing results and add fewer Steiner vertices on the 3D mesh embedding.", "abstracts": [ { "abstractType": "Regular", "content": "Texture mapping with positional constraints is an important and challenging problem in computer graphics. In this paper, we first present a theoretically robust, foldover-free 2D mesh warping algorithm. Then we apply this warping algorithm to handle mapping texture onto 3D meshes with hard constraints. The proposed algorithm is experimentally evaluated and compared with the state-of-the-art method for examples with more challenging constraints. These challenging constraints may lead to large distortions and foldovers. Experimental results show that the proposed scheme can generate more pleasing results and add fewer Steiner vertices on the 3D mesh embedding.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Texture mapping with positional constraints is an important and challenging problem in computer graphics. In this paper, we first present a theoretically robust, foldover-free 2D mesh warping algorithm. Then we apply this warping algorithm to handle mapping texture onto 3D meshes with hard constraints. The proposed algorithm is experimentally evaluated and compared with the state-of-the-art method for examples with more challenging constraints. These challenging constraints may lead to large distortions and foldovers. Experimental results show that the proposed scheme can generate more pleasing results and add fewer Steiner vertices on the 3D mesh embedding.", "title": "Texture Mapping with Hard Constraints Using Warping Scheme", "normalizedTitle": "Texture Mapping with Hard Constraints Using Warping Scheme", "fno": "ttg2008020382", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Texture Mapping", "Positional Constraints", "Warping", "Foldover" ], "authors": [ { "givenName": "Tong-Yee", "surname": "Lee", "fullName": "Tong-Yee Lee", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Shao-Wei", "surname": "Yen", "fullName": "Shao-Wei Yen", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "I-Cheng", "surname": "Yeh", "fullName": "I-Cheng Yeh", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2008-03-01 00:00:00", "pubType": "trans", "pages": "382-395", "year": "2008", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/1999/5897/0/58970014", "title": "Forward Image Mapping", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1999/58970014/12OmNC8Mst9", "parentPublication": { "id": "proceedings/ieee-vis/1999/5897/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccis/2012/4789/0/4789b147", "title": "Application of Compounded Texture Mapping to Ceramic Product Design", "doi": null, "abstractUrl": "/proceedings-article/iccis/2012/4789b147/12OmNqJ8tfc", "parentPublication": { "id": "proceedings/iccis/2012/4789/0", "title": "2012 Fourth International Conference on Computational and Information Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109b820", "title": "Super-Resolution Texture Mapping from Multiple View Images", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109b820/12OmNwFzNZF", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icnc/2009/3736/6/3736f008", "title": "Automatic Facial Image Manipulation System and Facial Texture Analysis", "doi": null, "abstractUrl": "/proceedings-article/icnc/2009/3736f008/12OmNx38vOS", "parentPublication": { "id": "proceedings/icnc/2009/3736/6", "title": "2009 Fifth International Conference on Natural Computation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icece/2010/4031/0/4031c343", "title": "The Foldover-free Condition of Locally Constrained Image Warping", "doi": null, "abstractUrl": "/proceedings-article/icece/2010/4031c343/12OmNxX3uF1", "parentPublication": { "id": "proceedings/icece/2010/4031/0", "title": "Electrical and Control Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciap/1999/0040/0/00401055", "title": "Texture Extraction from Photographs and Rendering with Dynamic Texture Mapping", "doi": null, "abstractUrl": "/proceedings-article/iciap/1999/00401055/12OmNz61drx", "parentPublication": { "id": "proceedings/iciap/1999/0040/0", "title": "Image Analysis and Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/03/06942245", "title": "Foldover-Free Mesh Warping for Constrained Texture Mapping", "doi": null, "abstractUrl": "/journal/tg/2015/03/06942245/13rRUwInvB9", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/07/ttg2012071115", "title": "An RBF-Based Reparameterization Method for Constrained Texture Mapping", "doi": null, "abstractUrl": "/journal/tg/2012/07/ttg2012071115/13rRUxC0SOV", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/1986/11/mcg1986110056", "title": "Survey of Texture Mapping", "doi": null, "abstractUrl": "/magazine/cg/1986/11/mcg1986110056/13rRUxYINas", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2005/04/mcg2005040066", "title": "Geometric Texture Modeling", "doi": null, "abstractUrl": "/magazine/cg/2005/04/mcg2005040066/13rRUyZaxsV", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2008020369", "articleId": "13rRUxBJhvn", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2008020396", "articleId": "13rRUx0xPZt", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyxXloT", "title": "March/April", "year": "2008", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "14", "label": "March/April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUx0xPZt", "doi": "10.1109/TVCG.2007.70436", "abstract": "This paper describes the integration of perceptual guidelines from human vision with an AI-based mixed-initiative search strategy. The result is a visualization assistant called ViA, a system that collaborates with its users to identify perceptually salient visualizations for large, multidimensional datasets. ViA applies knowledge of low-level human vision to: (1) evaluate the effectiveness of a particular visualization for a given dataset and analysis tasks; and (2) rapidly direct its search towards new visualizations that are most likely to offer improvements over those seen to date. Context, domain expertise, and a high-level understanding of a dataset are critical to identifying effective visualizations. We apply a mixed-initiative strategy that allows ViA and its users to share their different strengths and continually improve ViA's understanding of a user's preferences. We visualize historical weather conditions to compare ViA's search strategy to exhaustive analysis, simulated annealing, and reactive tabu search, and to measure the improvement provided by mixed-initiative interaction. We also visualize intelligent agents competing in a simulated online auction to evaluate ViA's perceptual guidelines. Results from each study are positive, suggesting that ViA can construct high-quality visualizations for a range of real-world datasets.", "abstracts": [ { "abstractType": "Regular", "content": "This paper describes the integration of perceptual guidelines from human vision with an AI-based mixed-initiative search strategy. The result is a visualization assistant called ViA, a system that collaborates with its users to identify perceptually salient visualizations for large, multidimensional datasets. ViA applies knowledge of low-level human vision to: (1) evaluate the effectiveness of a particular visualization for a given dataset and analysis tasks; and (2) rapidly direct its search towards new visualizations that are most likely to offer improvements over those seen to date. Context, domain expertise, and a high-level understanding of a dataset are critical to identifying effective visualizations. We apply a mixed-initiative strategy that allows ViA and its users to share their different strengths and continually improve ViA's understanding of a user's preferences. We visualize historical weather conditions to compare ViA's search strategy to exhaustive analysis, simulated annealing, and reactive tabu search, and to measure the improvement provided by mixed-initiative interaction. We also visualize intelligent agents competing in a simulated online auction to evaluate ViA's perceptual guidelines. Results from each study are positive, suggesting that ViA can construct high-quality visualizations for a range of real-world datasets.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper describes the integration of perceptual guidelines from human vision with an AI-based mixed-initiative search strategy. The result is a visualization assistant called ViA, a system that collaborates with its users to identify perceptually salient visualizations for large, multidimensional datasets. ViA applies knowledge of low-level human vision to: (1) evaluate the effectiveness of a particular visualization for a given dataset and analysis tasks; and (2) rapidly direct its search towards new visualizations that are most likely to offer improvements over those seen to date. Context, domain expertise, and a high-level understanding of a dataset are critical to identifying effective visualizations. We apply a mixed-initiative strategy that allows ViA and its users to share their different strengths and continually improve ViA's understanding of a user's preferences. We visualize historical weather conditions to compare ViA's search strategy to exhaustive analysis, simulated annealing, and reactive tabu search, and to measure the improvement provided by mixed-initiative interaction. We also visualize intelligent agents competing in a simulated online auction to evaluate ViA's perceptual guidelines. Results from each study are positive, suggesting that ViA can construct high-quality visualizations for a range of real-world datasets.", "title": "Visual Perception and Mixed-Initiative Interaction for Assisted Visualization Design", "normalizedTitle": "Visual Perception and Mixed-Initiative Interaction for Assisted Visualization Design", "fno": "ttg2008020396", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Multivariate Visualization", "Display Algorithms", "Interaction Techniques", "Human Information Processing" ], "authors": [ { "givenName": "Christopher", "surname": "Healey", "fullName": "Christopher Healey", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Sarat", "surname": "Kocherlakota", "fullName": "Sarat Kocherlakota", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Vivek", "surname": "Rao", "fullName": "Vivek Rao", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Reshma", "surname": "Mehta", "fullName": "Reshma Mehta", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Robert", "surname": "St. Amant", "fullName": "Robert St. Amant", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2008-03-01 00:00:00", "pubType": "trans", "pages": "396-411", "year": "2008", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/scc/2007/2925/0/29250443", "title": "A Formal Model for Mixed Initiative Service Composition", "doi": null, "abstractUrl": "/proceedings-article/scc/2007/29250443/12OmNB6UId1", "parentPublication": { "id": "proceedings/scc/2007/2925/0", "title": "IEEE International Conference on Services Computing (SCC 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2009/3791/0/3791a292", "title": "Comparison of Adaptive, Adaptable and Mixed-Initiative Menus", "doi": null, "abstractUrl": "/proceedings-article/cw/2009/3791a292/12OmNBBzoem", "parentPublication": { "id": "proceedings/cw/2009/3791/0", "title": "2009 International Conference on CyberWorlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2003/1874/5/187450127a", "title": "Mixed-Initiative Control for Remote Characterization of Hazardous Environments", "doi": null, "abstractUrl": "/proceedings-article/hicss/2003/187450127a/12OmNvq5jAb", "parentPublication": { "id": "proceedings/hicss/2003/1874/5", "title": "36th Annual Hawaii International Conference on System Sciences, 2003. Proceedings of the", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ozchi/1998/9206/0/92060338", "title": "Mixed Initiative in Design Space Exploration", "doi": null, "abstractUrl": "/proceedings-article/ozchi/1998/92060338/12OmNvq5jAv", "parentPublication": { "id": "proceedings/ozchi/1998/9206/0", "title": "Computer-Human Interaction, Australasian Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/citworkshops/2008/3242/0/3242a098", "title": "Robot with Emotion for Triggering Mixed-Initiative Interaction Planning", "doi": null, "abstractUrl": "/proceedings-article/citworkshops/2008/3242a098/12OmNwMob8I", "parentPublication": { "id": "proceedings/citworkshops/2008/3242/0", "title": "Computer and Information Technology, IEEE 8th International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2015/9783/0/07347625", "title": "Mixed-initiative visual analytics using task-driven recommendations", "doi": null, "abstractUrl": "/proceedings-article/vast/2015/07347625/12OmNwc3wtQ", "parentPublication": { "id": "proceedings/vast/2015/9783/0", "title": "2015 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/caia/1989/1902/0/00049143", "title": "Extending CATS: mixed-initiative inferencing", "doi": null, "abstractUrl": "/proceedings-article/caia/1989/00049143/12OmNxIzWPs", "parentPublication": { "id": "proceedings/caia/1989/1902/0", "title": "Proceedings The Fifth Conference on Artificial Intelligence Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2016/5670/0/5670b427", "title": "Mixed-Initiative for Big Data: The Intersection of Human + Visual Analytics + Prediction", "doi": null, "abstractUrl": "/proceedings-article/hicss/2016/5670b427/12OmNzVGcUy", "parentPublication": { "id": "proceedings/hicss/2016/5670/0", "title": "2016 49th Hawaii International Conference on System Sciences (HICSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090501", "title": "Exploring a Mixed Reality Framework for the Internet-of-Things: Toward Visualization and Interaction with Hybrid Objects and Avatars", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090501/1jIxiuC3FII", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2021/3931/0/393100a111", "title": "Mixed-Initiative Approach to Extract Data from Pictures of Medical Invoice", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2021/393100a111/1tTtrLpcoJG", "parentPublication": { "id": "proceedings/pacificvis/2021/3931/0", "title": "2021 IEEE 14th Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2008020382", "articleId": "13rRUyv53Fh", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2008020412", "articleId": "13rRUxD9h50", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyxXloT", "title": "March/April", "year": "2008", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "14", "label": "March/April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxD9h50", "doi": "10.1109/TVCG.2007.70439", "abstract": "The apparent reflectance of a surface depends upon the resolution at which it is imaged. Conventional reflectance models represent reflection at a single predetermined resolution; however, a low-resolution pixel that views a greater surface area often exhibits a reflectance more complicated than a high-resolution pixel with a smaller area. To address resolution dependency in reflectance, we utilize a generalized reflectance model based on a mixture of multiple conventional models, and present a framework for efficiently determining the reflectance mixture model of each pixel with respect to resolution. Mixture model parameters are precomputed at multiple resolutions and stored in mipmaps. Unlike color textures, these reflectance parameters cannot be accurately filtered by trilinear interpolation, so we present a technique for nonlinear mipmap filtering that minimizes aliasing in rendered results. This framework can be applied with various parametric reflectance models in graphics hardware for real-time processing. With this technique for filtering and rendering with mipmaps of reflectance mixture models, our system can rapidly render the resolution-dependent reflectance effects that are customarily disregarded in conventional rendering methods. At the end of this paper, we also describe how shadowing and masking effects can be incorporated into this framework to increase the realism of rendering.", "abstracts": [ { "abstractType": "Regular", "content": "The apparent reflectance of a surface depends upon the resolution at which it is imaged. Conventional reflectance models represent reflection at a single predetermined resolution; however, a low-resolution pixel that views a greater surface area often exhibits a reflectance more complicated than a high-resolution pixel with a smaller area. To address resolution dependency in reflectance, we utilize a generalized reflectance model based on a mixture of multiple conventional models, and present a framework for efficiently determining the reflectance mixture model of each pixel with respect to resolution. Mixture model parameters are precomputed at multiple resolutions and stored in mipmaps. Unlike color textures, these reflectance parameters cannot be accurately filtered by trilinear interpolation, so we present a technique for nonlinear mipmap filtering that minimizes aliasing in rendered results. This framework can be applied with various parametric reflectance models in graphics hardware for real-time processing. With this technique for filtering and rendering with mipmaps of reflectance mixture models, our system can rapidly render the resolution-dependent reflectance effects that are customarily disregarded in conventional rendering methods. At the end of this paper, we also describe how shadowing and masking effects can be incorporated into this framework to increase the realism of rendering.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The apparent reflectance of a surface depends upon the resolution at which it is imaged. Conventional reflectance models represent reflection at a single predetermined resolution; however, a low-resolution pixel that views a greater surface area often exhibits a reflectance more complicated than a high-resolution pixel with a smaller area. To address resolution dependency in reflectance, we utilize a generalized reflectance model based on a mixture of multiple conventional models, and present a framework for efficiently determining the reflectance mixture model of each pixel with respect to resolution. Mixture model parameters are precomputed at multiple resolutions and stored in mipmaps. Unlike color textures, these reflectance parameters cannot be accurately filtered by trilinear interpolation, so we present a technique for nonlinear mipmap filtering that minimizes aliasing in rendered results. This framework can be applied with various parametric reflectance models in graphics hardware for real-time processing. With this technique for filtering and rendering with mipmaps of reflectance mixture models, our system can rapidly render the resolution-dependent reflectance effects that are customarily disregarded in conventional rendering methods. At the end of this paper, we also describe how shadowing and masking effects can be incorporated into this framework to increase the realism of rendering.", "title": "Filtering and Rendering of Resolution-Dependent Reflectance Models", "normalizedTitle": "Filtering and Rendering of Resolution-Dependent Reflectance Models", "fno": "ttg2008020412", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Antialiasing", "Color", "Shading", "Shadowing", "And Texture", "Reflectance" ], "authors": [ { "givenName": "Ping", "surname": "Tan", "fullName": "Ping Tan", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Stephen", "surname": "Lin", "fullName": "Stephen Lin", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Long", "surname": "Quan", "fullName": "Long Quan", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Baining", "surname": "Guo", "fullName": "Baining Guo", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Harry", "surname": "Shum", "fullName": "Harry Shum", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2008-03-01 00:00:00", "pubType": "trans", "pages": "412-425", "year": "2008", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ssiai/2000/0595/0/05950146", "title": "Characterization of Skin Lesion Texture in Diffuse Reflectance Spectroscopic Images", "doi": null, "abstractUrl": "/proceedings-article/ssiai/2000/05950146/12OmNy7yEjE", "parentPublication": { "id": "proceedings/ssiai/2000/0595/0", "title": "Image Analysis and Interpretation, IEEE Southwest Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2008/08/ttp2008081460", "title": "Subpixel Photometric Stereo", "doi": null, "abstractUrl": "/journal/tp/2008/08/ttp2008081460/13rRUEgs2N4", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2001/04/v0318", "title": "High-Quality Texture Reconstruction from Multiple Scans", "doi": null, "abstractUrl": "/journal/tg/2001/04/v0318/13rRUwh80GZ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2008/01/mcg2008010034", "title": "Subsurface Texture Mapping", "doi": null, "abstractUrl": "/magazine/cg/2008/01/mcg2008010034/13rRUx0xPOa", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/03/ttg2008030539", "title": "Antialiasing Procedural Shaders with Reduction Maps", "doi": null, "abstractUrl": "/journal/tg/2008/03/ttg2008030539/13rRUx0xPmU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/1997/04/v0329", "title": "A Wavelet Representation of Reflectance Functions", "doi": null, "abstractUrl": "/journal/tg/1997/04/v0329/13rRUxBJhmG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/02/ttg2012020242", "title": "A Survey of Nonlinear Prefiltering Methods for Efficient and Accurate Surface Shading", "doi": null, "abstractUrl": "/journal/tg/2012/02/ttg2012020242/13rRUxBa5bU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2005/02/v0126", "title": "Barycentric Parameterizations for Isotropic BRDFs", "doi": null, "abstractUrl": "/journal/tg/2005/02/v0126/13rRUxNEqPC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2004/03/v0278", "title": "Synthesis and Rendering of Bidirectional Texture Functions on Arbitrary Surfaces", "doi": null, "abstractUrl": "/journal/tg/2004/03/v0278/13rRUyYSWsE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2005/03/v0296", "title": "Reflectance from Images: A Model-Based Approach for Human Faces", "doi": null, "abstractUrl": "/journal/tg/2005/03/v0296/13rRUyYSWsG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2008020396", "articleId": "13rRUx0xPZt", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2008020426", "articleId": "13rRUIJcWle", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyxXloT", "title": "March/April", "year": "2008", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "14", "label": "March/April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUIJcWle", "doi": "10.1109/TVCG.2007.70438", "abstract": "This paper presents an approach of replacing textures of specified regions in the input image/video with new ones. The replacement results have the similar distortion and shading effects conforming to the underlying geometry and lighting conditions. For replacing textures in single image, the approach consists of two steps. First, a stretchbased mesh parametrization incorporating the recovered normal information is deduced to imitate perspective distortion of the interest region. Second, a Poisson-based refinement process is exploited to account for texture distortion at fine scale. Our approach is independent of the replaced textures. Once processing the input image is completed, any new texture can be applied efficiently. For dealing with video sequence, one key-frame based texture replacement approach is devised. The approach is generalized from image retexturing. It repeatedly propagates the replacement results of key frames to the rest ones. We develop a local motion optimization scheme to deal with the inaccuracies of optical flow when tracking moving objects. One graphcut segmentation algorithm is incorporated into the approach for handling visibility shifting. Texture drifting is alleviated with one globally optimization to smooth trajectories of the tracked points over temporal domain. Experimental results show that our approach can generate visually pleasing results for both image and video.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents an approach of replacing textures of specified regions in the input image/video with new ones. The replacement results have the similar distortion and shading effects conforming to the underlying geometry and lighting conditions. For replacing textures in single image, the approach consists of two steps. First, a stretchbased mesh parametrization incorporating the recovered normal information is deduced to imitate perspective distortion of the interest region. Second, a Poisson-based refinement process is exploited to account for texture distortion at fine scale. Our approach is independent of the replaced textures. Once processing the input image is completed, any new texture can be applied efficiently. For dealing with video sequence, one key-frame based texture replacement approach is devised. The approach is generalized from image retexturing. It repeatedly propagates the replacement results of key frames to the rest ones. We develop a local motion optimization scheme to deal with the inaccuracies of optical flow when tracking moving objects. One graphcut segmentation algorithm is incorporated into the approach for handling visibility shifting. Texture drifting is alleviated with one globally optimization to smooth trajectories of the tracked points over temporal domain. Experimental results show that our approach can generate visually pleasing results for both image and video.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents an approach of replacing textures of specified regions in the input image/video with new ones. The replacement results have the similar distortion and shading effects conforming to the underlying geometry and lighting conditions. For replacing textures in single image, the approach consists of two steps. First, a stretchbased mesh parametrization incorporating the recovered normal information is deduced to imitate perspective distortion of the interest region. Second, a Poisson-based refinement process is exploited to account for texture distortion at fine scale. Our approach is independent of the replaced textures. Once processing the input image is completed, any new texture can be applied efficiently. For dealing with video sequence, one key-frame based texture replacement approach is devised. The approach is generalized from image retexturing. It repeatedly propagates the replacement results of key frames to the rest ones. We develop a local motion optimization scheme to deal with the inaccuracies of optical flow when tracking moving objects. One graphcut segmentation algorithm is incorporated into the approach for handling visibility shifting. Texture drifting is alleviated with one globally optimization to smooth trajectories of the tracked points over temporal domain. Experimental results show that our approach can generate visually pleasing results for both image and video.", "title": "Mesh-Guided Optimized Retexturing for Image and Video", "normalizedTitle": "Mesh-Guided Optimized Retexturing for Image and Video", "fno": "ttg2008020426", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Picture Image Generation", "Color", "Shading", "Shadowing", "And Texture", "Image Processing And Computer Vision" ], "authors": [ { "givenName": "Yanwen", "surname": "Guo", "fullName": "Yanwen Guo", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Hanqiu", "surname": "Sun", "fullName": "Hanqiu Sun", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Qunsheng", "surname": "Peng", "fullName": "Qunsheng Peng", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Zhongding", "surname": "Jiang", "fullName": "Zhongding Jiang", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2008-03-01 00:00:00", "pubType": "trans", "pages": "426-439", "year": "2008", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icig/2011/4541/0/4541a117", "title": "Optimized-SSIM Based Quantization in Optical Remote Sensing Image Compression", "doi": null, "abstractUrl": "/proceedings-article/icig/2011/4541a117/12OmNBJeyHZ", "parentPublication": { "id": "proceedings/icig/2011/4541/0", "title": "Image and Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icassp/2004/8484/3/01326579", "title": "Merging MPEG-7 descriptors for image content analysis", "doi": null, "abstractUrl": "/proceedings-article/icassp/2004/01326579/12OmNviHKkY", "parentPublication": { "id": "proceedings/icassp/2004/8484/3", "title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2015/6964/0/07298817", "title": "Texture representations for image and video synthesis", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2015/07298817/12OmNvoFjUu", "parentPublication": { "id": "proceedings/cvpr/2015/6964/0", "title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2001/7200/0/7200dong", "title": "Volume Rendering of Fine Details Within Medical Data", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2001/7200dong/12OmNx6xHlc", "parentPublication": { "id": "proceedings/ieee-vis/2001/7200/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciap/1999/0040/0/00401055", "title": "Texture Extraction from Photographs and Rendering with Dynamic Texture Mapping", "doi": null, "abstractUrl": "/proceedings-article/iciap/1999/00401055/12OmNz61drx", "parentPublication": { "id": "proceedings/iciap/1999/0040/0", "title": "Image Analysis and Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1998/9176/0/91760343", "title": "Interactive Display of Very Large Textures", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1998/91760343/12OmNzICERi", "parentPublication": { "id": "proceedings/ieee-vis/1998/9176/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1998/9176/0/91760059", "title": "A General Method for Preserving Attribute Values on Simplified Meshes", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1998/91760059/12OmNzUxOgJ", "parentPublication": { "id": "proceedings/ieee-vis/1998/9176/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/03/ttg2008030539", "title": "Antialiasing Procedural Shaders with Reduction Maps", "doi": null, "abstractUrl": "/journal/tg/2008/03/ttg2008030539/13rRUx0xPmU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/02/ttg2008020412", "title": "Filtering and Rendering of Resolution-Dependent Reflectance Models", "doi": null, "abstractUrl": "/journal/tg/2008/02/ttg2008020412/13rRUxD9h50", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2005/02/v0126", "title": "Barycentric Parameterizations for Isotropic BRDFs", "doi": null, "abstractUrl": "/journal/tg/2005/02/v0126/13rRUxNEqPC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2008020412", "articleId": "13rRUxD9h50", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2008020440", "articleId": "13rRUNvgz49", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyxXloT", "title": "March/April", "year": "2008", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "14", "label": "March/April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUNvgz49", "doi": "10.1109/TVCG.2007.70441", "abstract": "In this paper, we propose a generic point cloud encoder that provides a unified framework for compressing different attributes of point samples corresponding to 3D objects with arbitrary topology. In the proposed scheme, the coding process is led by an iterative octree cell subdivision of the object space. At each level of subdivision, positions of point samples are approximated by the geometry centers of all tree-front cells while normals and colors are approximated by their statistical average within each of tree-front cells. With this framework, we employ attribute-dependent encoding techniques to exploit different characteristics of various attributes. All of these have led to significant improvement in the rate-distortion (R-D) performance and a computational advantage over the state of the art. Furthermore, given sufficient levels of octree expansion, normal space partitioning and resolution of color quantization, the proposed point cloud encoder can be potentially used for lossless coding of 3D point clouds.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we propose a generic point cloud encoder that provides a unified framework for compressing different attributes of point samples corresponding to 3D objects with arbitrary topology. In the proposed scheme, the coding process is led by an iterative octree cell subdivision of the object space. At each level of subdivision, positions of point samples are approximated by the geometry centers of all tree-front cells while normals and colors are approximated by their statistical average within each of tree-front cells. With this framework, we employ attribute-dependent encoding techniques to exploit different characteristics of various attributes. All of these have led to significant improvement in the rate-distortion (R-D) performance and a computational advantage over the state of the art. Furthermore, given sufficient levels of octree expansion, normal space partitioning and resolution of color quantization, the proposed point cloud encoder can be potentially used for lossless coding of 3D point clouds.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we propose a generic point cloud encoder that provides a unified framework for compressing different attributes of point samples corresponding to 3D objects with arbitrary topology. In the proposed scheme, the coding process is led by an iterative octree cell subdivision of the object space. At each level of subdivision, positions of point samples are approximated by the geometry centers of all tree-front cells while normals and colors are approximated by their statistical average within each of tree-front cells. With this framework, we employ attribute-dependent encoding techniques to exploit different characteristics of various attributes. All of these have led to significant improvement in the rate-distortion (R-D) performance and a computational advantage over the state of the art. Furthermore, given sufficient levels of octree expansion, normal space partitioning and resolution of color quantization, the proposed point cloud encoder can be potentially used for lossless coding of 3D point clouds.", "title": "A Generic Scheme for Progressive Point Cloud Coding", "normalizedTitle": "A Generic Scheme for Progressive Point Cloud Coding", "fno": "ttg2008020440", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Compaction And Compression", "Indexing Methods", "Object Hierarchies" ], "authors": [ { "givenName": "Yan", "surname": "Huang", "fullName": "Yan Huang", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Jingliang", "surname": "Peng", "fullName": "Jingliang Peng", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "C.-C. Jay", "surname": "Kuo", "fullName": "C.-C. Jay Kuo", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "M.", "surname": "Gopi", "fullName": "M. Gopi", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2008-03-01 00:00:00", "pubType": "trans", "pages": "440-453", "year": "2008", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icme/2004/8603/1/01394110", "title": "Progressive geometry encoder using octree-based space partitioning", "doi": null, "abstractUrl": "/proceedings-article/icme/2004/01394110/12OmNBbsifT", "parentPublication": { "id": "proceedings/icme/2004/8603/1", "title": "2004 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcc/2012/4656/0/4656a398", "title": "Efficient Progressive Compression of 3D Points by Maximizing Tangent-Plane Continuity", "doi": null, "abstractUrl": "/proceedings-article/dcc/2012/4656a398/12OmNxymo98", "parentPublication": { "id": "proceedings/dcc/2012/4656/0", "title": "Data Compression Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2006/2686/0/26860229", "title": "Point set compression through BSP quantization", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2006/26860229/12OmNzFdt7I", "parentPublication": { "id": "proceedings/sibgrapi/2006/2686/0", "title": "2006 19th Brazilian Symposium on Computer Graphics and Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/02/ttg2012020188", "title": "Visualizing Nonmanifold and Singular Implicit Surfaces with Point Clouds", "doi": null, "abstractUrl": "/journal/tg/2012/02/ttg2012020188/13rRUwd9CLK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2018/1737/0/08486481", "title": "Scalable Point Cloud Geometry Coding with Binary Tree Embedded Quadtree", "doi": null, "abstractUrl": "/proceedings-article/icme/2018/08486481/14jQfO9lbCW", "parentPublication": { "id": "proceedings/icme/2018/1737/0", "title": "2018 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2021/3902/0/09671659", "title": "Point cloud indexing using Big Data technologies", "doi": null, "abstractUrl": "/proceedings-article/big-data/2021/09671659/1A8gy7yb7iw", "parentPublication": { "id": "proceedings/big-data/2021/3902/0", "title": "2021 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200g393", "title": "Progressive Seed Generation Auto-encoder for Unsupervised Point Cloud Learning", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200g393/1BmGLyAUHug", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mipr/2022/9548/0/954800a096", "title": "Learning to Predict on Octree for Scalable Point Cloud Geometry Coding", "doi": null, "abstractUrl": "/proceedings-article/mipr/2022/954800a096/1Gvddm9kzTO", "parentPublication": { "id": "proceedings/mipr/2022/9548/0", "title": "2022 IEEE 5th International Conference on Multimedia Information Processing and Retrieval (MIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300b009", "title": "3D Point Capsule Networks", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300b009/1gyruqGAGti", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2020/1331/0/09102866", "title": "Lossy Geometry Compression Of 3d Point Cloud Data Via An Adaptive Octree-Guided Network", "doi": null, "abstractUrl": "/proceedings-article/icme/2020/09102866/1kwr58J4kWQ", "parentPublication": { "id": "proceedings/icme/2020/1331/0", "title": "2020 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2008020426", "articleId": "13rRUIJcWle", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2008020454", "articleId": "13rRUwvT9gl", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyxXloT", "title": "March/April", "year": "2008", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "14", "label": "March/April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwvT9gl", "doi": "10.1109/TVCG.2007.70442", "abstract": "This paper presents a novel basis function, called spherical piecewise constant basis function (SPCBF), for precomputed radiance transfer. SPCBFs have several desirable properties: rotatability, ability to represent all-frequency signals, and support for efficient multiple product. By smartly partitioning the illumination sphere into a set of subregions, and associating each subregion with an SPCBF valued 1 inside the region and 0 elsewhere, we precompute the light coefficients using the resulting SPCBFs. Efficient rotation of the light representation in SPCBFs is achieved by rotating the domain of SPCBFs. We run-time approximate the BRDF and visibility coefficients using the set of SPCBFs for light, possibly rotated, through fast lookup of summed-area-table (SAT) and visibility distance table (VDT), respectively. SPCBFs enable new effects such as object rotation in all-frequency rendering of dynamic scenes and on-the-fly BRDF editing under rotating environment lighting. With graphics hardware acceleration, our method achieves real-time frame rates.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a novel basis function, called spherical piecewise constant basis function (SPCBF), for precomputed radiance transfer. SPCBFs have several desirable properties: rotatability, ability to represent all-frequency signals, and support for efficient multiple product. By smartly partitioning the illumination sphere into a set of subregions, and associating each subregion with an SPCBF valued 1 inside the region and 0 elsewhere, we precompute the light coefficients using the resulting SPCBFs. Efficient rotation of the light representation in SPCBFs is achieved by rotating the domain of SPCBFs. We run-time approximate the BRDF and visibility coefficients using the set of SPCBFs for light, possibly rotated, through fast lookup of summed-area-table (SAT) and visibility distance table (VDT), respectively. SPCBFs enable new effects such as object rotation in all-frequency rendering of dynamic scenes and on-the-fly BRDF editing under rotating environment lighting. With graphics hardware acceleration, our method achieves real-time frame rates.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a novel basis function, called spherical piecewise constant basis function (SPCBF), for precomputed radiance transfer. SPCBFs have several desirable properties: rotatability, ability to represent all-frequency signals, and support for efficient multiple product. By smartly partitioning the illumination sphere into a set of subregions, and associating each subregion with an SPCBF valued 1 inside the region and 0 elsewhere, we precompute the light coefficients using the resulting SPCBFs. Efficient rotation of the light representation in SPCBFs is achieved by rotating the domain of SPCBFs. We run-time approximate the BRDF and visibility coefficients using the set of SPCBFs for light, possibly rotated, through fast lookup of summed-area-table (SAT) and visibility distance table (VDT), respectively. SPCBFs enable new effects such as object rotation in all-frequency rendering of dynamic scenes and on-the-fly BRDF editing under rotating environment lighting. With graphics hardware acceleration, our method achieves real-time frame rates.", "title": "Spherical Piecewise Constant Basis Functions for All-Frequency Precomputed Radiance Transfer", "normalizedTitle": "Spherical Piecewise Constant Basis Functions for All-Frequency Precomputed Radiance Transfer", "fno": "ttg2008020454", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Graphics", "Color", "Shading", "Shadowing", "And Texture", "Real Time Rendering", "Precomputed Radiance Transfer", "Spherical Piecewise Constant Basis Functions" ], "authors": [ { "givenName": "Kun", "surname": "Xu", "fullName": "Kun Xu", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Yun-Tao", "surname": "Jia", "fullName": "Yun-Tao Jia", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Hongbo", "surname": "Fu", "fullName": "Hongbo Fu", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Shimin", "surname": "Hu", "fullName": "Shimin Hu", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Chiew-Lan", "surname": "Tai", "fullName": "Chiew-Lan Tai", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2008-03-01 00:00:00", "pubType": "trans", "pages": "454-467", "year": "2008", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cgiv/2004/2178/0/21780101", "title": "Precomputed Radiance Transfer with Spatially-Varying Lighting Effects", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2004/21780101/12OmNBQkx3g", "parentPublication": { "id": "proceedings/cgiv/2004/2178/0", "title": "Proceedings. International Conference on Computer Graphics, Imaging and Visualization, 2004. CGIV 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2007/3009/0/30090161", "title": "Precomputed Visibility Cuts for Interactive Relighting with Dynamic BRDFs", "doi": null, "abstractUrl": "/proceedings-article/pg/2007/30090161/12OmNqFa5pJ", "parentPublication": { "id": "proceedings/pg/2007/3009/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacific-graphics/2010/4205/0/4205a024", "title": "Fast Height-Field Rendering under Image-Based Lighting", "doi": null, "abstractUrl": "/proceedings-article/pacific-graphics/2010/4205a024/12OmNs0C9Uf", "parentPublication": { "id": "proceedings/pacific-graphics/2010/4205/0", "title": "Pacific Conference on Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/1997/8028/0/80280059", "title": "Rendering of spherical light fields", "doi": null, "abstractUrl": "/proceedings-article/pg/1997/80280059/12OmNvTTciE", "parentPublication": { "id": "proceedings/pg/1997/8028/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/02/v0254", "title": "Noise-Resistant Fitting for Spherical Harmonics", "doi": null, "abstractUrl": "/journal/tg/2006/02/v0254/13rRUwhHcQL", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/03/ttg2012030447", "title": "Efficient Visibility Encoding for Dynamic Illumination in Direct Volume Rendering", "doi": null, "abstractUrl": "/journal/tg/2012/03/ttg2012030447/13rRUxAATgu", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/01/ttg2010010043", "title": "All-Frequency Lighting with Multiscale Spherical Radial Basis Functions", "doi": null, "abstractUrl": "/journal/tg/2010/01/ttg2010010043/13rRUxjQybO", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/11/ttg2012111811", "title": "Precomputed Safety Shapes for Efficient and Accurate Height-Field Rendering", "doi": null, "abstractUrl": "/journal/tg/2012/11/ttg2012111811/13rRUxjQyvh", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/08/ttg2013081317", "title": "Real-Time Volume Rendering in Dynamic Lighting Environments Using Precomputed Photon Mapping", "doi": null, "abstractUrl": "/journal/tg/2013/08/ttg2013081317/13rRUynHuja", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2003/03/mcg2003030028", "title": "Efficient Light Transport Using Precomputed Visibility", "doi": null, "abstractUrl": "/magazine/cg/2003/03/mcg2003030028/13rRUzpzeHH", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2008020440", "articleId": "13rRUNvgz49", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1tWJ8EdItri", "title": "July", "year": "2021", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1g1FWXBrTxe", "doi": "10.1109/TVCG.2019.2961893", "abstract": "We present a machine learning-based approach for detecting and visualizing complex behavior in spatiotemporal volumes. For this, we train models to predict future data values at a given position based on the past values in its neighborhood, capturing common temporal behavior in the data. We then evaluate the model's prediction on the same data. High prediction error means that the local behavior was too complex, unique or uncertain to be accurately captured during training, indicating spatiotemporal regions with interesting behavior. By training several models of varying capacity, we are able to detect spatiotemporal regions of various complexities. We aggregate the obtained prediction errors into a time series or spatial volumes and visualize them together to highlight regions of unpredictable behavior and how they differ between the models. We demonstrate two further volumetric applications: adaptive timestep selection and analysis of ensemble dissimilarity. We apply our technique to datasets from multiple application domains and demonstrate that we are able to produce meaningful results while making minimal assumptions about the underlying data.", "abstracts": [ { "abstractType": "Regular", "content": "We present a machine learning-based approach for detecting and visualizing complex behavior in spatiotemporal volumes. For this, we train models to predict future data values at a given position based on the past values in its neighborhood, capturing common temporal behavior in the data. We then evaluate the model's prediction on the same data. High prediction error means that the local behavior was too complex, unique or uncertain to be accurately captured during training, indicating spatiotemporal regions with interesting behavior. By training several models of varying capacity, we are able to detect spatiotemporal regions of various complexities. We aggregate the obtained prediction errors into a time series or spatial volumes and visualize them together to highlight regions of unpredictable behavior and how they differ between the models. We demonstrate two further volumetric applications: adaptive timestep selection and analysis of ensemble dissimilarity. We apply our technique to datasets from multiple application domains and demonstrate that we are able to produce meaningful results while making minimal assumptions about the underlying data.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a machine learning-based approach for detecting and visualizing complex behavior in spatiotemporal volumes. For this, we train models to predict future data values at a given position based on the past values in its neighborhood, capturing common temporal behavior in the data. We then evaluate the model's prediction on the same data. High prediction error means that the local behavior was too complex, unique or uncertain to be accurately captured during training, indicating spatiotemporal regions with interesting behavior. By training several models of varying capacity, we are able to detect spatiotemporal regions of various complexities. We aggregate the obtained prediction errors into a time series or spatial volumes and visualize them together to highlight regions of unpredictable behavior and how they differ between the models. We demonstrate two further volumetric applications: adaptive timestep selection and analysis of ensemble dissimilarity. We apply our technique to datasets from multiple application domains and demonstrate that we are able to produce meaningful results while making minimal assumptions about the underlying data.", "title": "Local Prediction Models for Spatiotemporal Volume Visualization", "normalizedTitle": "Local Prediction Models for Spatiotemporal Volume Visualization", "fno": "08941308", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualisation", "Learning Artificial Intelligence", "Spatiotemporal Phenomena", "Time Series", "Local Prediction Models", "Spatiotemporal Volume Visualization", "Machine Learning Based Approach", "Temporal Behavior", "Prediction Error", "Local Behavior", "Prediction Errors", "Time Series", "Spatial Volumes", "Unpredictable Behavior", "Complex Behavior Detection", "Complex Behavior Visualization", "Future Data Values Prediction", "Spatiotemporal Region Detection", "Adaptive Timestep Selection", "Ensemble Dissimilarity Analysis", "Data Visualization", "Predictive Models", "Spatiotemporal Phenomena", "Analytical Models", "Data Models", "Neural Networks", "Training", "Volume Visualization", "Machine Learning", "Neural Nets", "Ensemble Visualization" ], "authors": [ { "givenName": "Gleb", "surname": "Tkachev", "fullName": "Gleb Tkachev", "affiliation": "Visualization Research Center, University of Stuttgart, Stuttgart, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Steffen", "surname": "Frey", "fullName": "Steffen Frey", "affiliation": "Visualization Research Center, University of Stuttgart, Stuttgart, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Thomas", "surname": "Ertl", "fullName": "Thomas Ertl", "affiliation": "Visualization Research Center, University of Stuttgart, Stuttgart, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2021-07-01 00:00:00", "pubType": "trans", "pages": "3091-3108", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icdmw/2013/3142/0/3143a994", "title": "Severe Hail Prediction within a Spatiotemporal Relational Data Mining Framework", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2013/3143a994/12OmNzZWbJ9", "parentPublication": { "id": "proceedings/icdmw/2013/3142/0", "title": "2013 IEEE 13th International Conference on Data Mining Workshops (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600n3926", "title": "STRPM: A Spatiotemporal Residual Predictive Model for High-Resolution Video Prediction", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600n3926/1H1mJyi5Fmg", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/5555/01/09944966", "title": "Traffic Flow Prediction Based on Spatiotemporal Potential Energy Fields", "doi": null, "abstractUrl": "/journal/tk/5555/01/09944966/1IbM9Dh1cuA", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mdm/2019/3363/0/336300a298", "title": "Traffic Congestion Prediction by Spatiotemporal Propagation Patterns", "doi": null, "abstractUrl": "/proceedings-article/mdm/2019/336300a298/1ckrQodp6qk", "parentPublication": { "id": "proceedings/mdm/2019/3363/0", "title": "2019 20th IEEE International Conference on Mobile Data Management (MDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/06/09321130", "title": "Spatiotemporal Co-Attention Recurrent Neural Networks for Human-Skeleton Motion Prediction", "doi": null, "abstractUrl": "/journal/tp/2022/06/09321130/1qkwzzV7Zug", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2020/8316/0/831600b442", "title": "FreqST: Exploiting Frequency Information in Spatiotemporal Modeling for Traffic Prediction", "doi": null, "abstractUrl": "/proceedings-article/icdm/2020/831600b442/1r54Bbu9Tji", "parentPublication": { "id": "proceedings/icdm/2020/8316/0", "title": "2020 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2020/8316/0/831600b034", "title": "A Heterogeneous Spatiotemporal Network for Lightning Prediction", "doi": null, "abstractUrl": "/proceedings-article/icdm/2020/831600b034/1r54DwDI8gM", "parentPublication": { "id": "proceedings/icdm/2020/8316/0", "title": "2020 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2020/8316/0/831600b076", "title": "Interpretable Spatiotemporal Deep Learning Model for Traffic Flow Prediction Based on Potential Energy Fields", "doi": null, "abstractUrl": "/proceedings-article/icdm/2020/831600b076/1r54HZaHBNC", "parentPublication": { "id": "proceedings/icdm/2020/8316/0", "title": "2020 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2020/8316/0/831600a392", "title": "Context-Aware Deep Representation Learning for Geo-Spatiotemporal Analysis", "doi": null, "abstractUrl": "/proceedings-article/icdm/2020/831600a392/1r54zzSEPrG", "parentPublication": { "id": "proceedings/icdm/2020/8316/0", "title": "2020 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2021/3864/0/09428231", "title": "STAE: A Spatiotemporal Auto-Encoder for High-Resolution Video Prediction", "doi": null, "abstractUrl": "/proceedings-article/icme/2021/09428231/1uilMLOlmaA", "parentPublication": { "id": "proceedings/icme/2021/3864/0", "title": "2021 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": null, "next": { "fno": "08943144", "articleId": "1g3bi26D34k", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1tWJd6H1ahi", "name": "ttg202107-08941308s1-tvcg-2961893-mm.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202107-08941308s1-tvcg-2961893-mm.zip", "extension": "zip", "size": "33.3 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1tWJ8EdItri", "title": "July", "year": "2021", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1g3bi26D34k", "doi": "10.1109/TVCG.2019.2962404", "abstract": "We present a new visual exploration concept - Progressive Visual Analytics with Safeguards - that helps people manage the uncertainty arising from progressive data exploration. Despite its potential benefits, intermediate knowledge from progressive analytics can be incorrect due to various machine and human factors, such as a sampling bias or misinterpretation of uncertainty. To alleviate this problem, we introduce PVA-Guards, safeguards people can leave on uncertain intermediate knowledge that needs to be verified, and derive seven PVA-Guards based on previous visualization task taxonomies. PVA-Guards provide a means of ensuring the correctness of the conclusion and understanding the reason when intermediate knowledge becomes invalid. We also present ProReveal, a proof-of-concept system designed and developed to integrate the seven safeguards into progressive data exploration. Finally, we report a user study with 14 participants, which shows people voluntarily employed PVA-Guards to safeguard their findings and ProReveal's PVA-Guard view provides an overview of uncertain intermediate knowledge. We believe our new concept can also offer better consistency in progressive data exploration, alleviating people's heterogeneous interpretation of uncertainty.", "abstracts": [ { "abstractType": "Regular", "content": "We present a new visual exploration concept - Progressive Visual Analytics with Safeguards - that helps people manage the uncertainty arising from progressive data exploration. Despite its potential benefits, intermediate knowledge from progressive analytics can be incorrect due to various machine and human factors, such as a sampling bias or misinterpretation of uncertainty. To alleviate this problem, we introduce PVA-Guards, safeguards people can leave on uncertain intermediate knowledge that needs to be verified, and derive seven PVA-Guards based on previous visualization task taxonomies. PVA-Guards provide a means of ensuring the correctness of the conclusion and understanding the reason when intermediate knowledge becomes invalid. We also present ProReveal, a proof-of-concept system designed and developed to integrate the seven safeguards into progressive data exploration. Finally, we report a user study with 14 participants, which shows people voluntarily employed PVA-Guards to safeguard their findings and ProReveal's PVA-Guard view provides an overview of uncertain intermediate knowledge. We believe our new concept can also offer better consistency in progressive data exploration, alleviating people's heterogeneous interpretation of uncertainty.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a new visual exploration concept - Progressive Visual Analytics with Safeguards - that helps people manage the uncertainty arising from progressive data exploration. Despite its potential benefits, intermediate knowledge from progressive analytics can be incorrect due to various machine and human factors, such as a sampling bias or misinterpretation of uncertainty. To alleviate this problem, we introduce PVA-Guards, safeguards people can leave on uncertain intermediate knowledge that needs to be verified, and derive seven PVA-Guards based on previous visualization task taxonomies. PVA-Guards provide a means of ensuring the correctness of the conclusion and understanding the reason when intermediate knowledge becomes invalid. We also present ProReveal, a proof-of-concept system designed and developed to integrate the seven safeguards into progressive data exploration. Finally, we report a user study with 14 participants, which shows people voluntarily employed PVA-Guards to safeguard their findings and ProReveal's PVA-Guard view provides an overview of uncertain intermediate knowledge. We believe our new concept can also offer better consistency in progressive data exploration, alleviating people's heterogeneous interpretation of uncertainty.", "title": "ProReveal: Progressive Visual Analytics With Safeguards", "normalizedTitle": "ProReveal: Progressive Visual Analytics With Safeguards", "fno": "08943144", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Analysis", "Data Visualisation", "Human Factors", "Progressive Visual Analytics With Safeguards", "Heterogeneous Interpretation", "PVA Guard", "Proof Of Concept System", "Human Factors", "Pro Reveal", "Visualization Task Taxonomies", "Uncertain Intermediate Knowledge", "Progressive Data Exploration", "Visual Exploration", "Data Visualization", "Uncertainty", "Visual Analytics", "Human Factors", "Computational Modeling", "Bars", "Task Analysis", "Progressive Visual Analytics", "Intermediate Knowledge Representation", "Hypothesis Testing", "Scalability", "Uncertainty" ], "authors": [ { "givenName": "Jaemin", "surname": "Jo", "fullName": "Jaemin Jo", "affiliation": "Department of Computer Science and Engineering, Seoul National University, Seoul, Republic of Korea", "__typename": "ArticleAuthorType" }, { "givenName": "Sehi", "surname": "L’Yi", "fullName": "Sehi L’Yi", "affiliation": "Department of Computer Science and Engineering, Seoul National University, Seoul, Republic of Korea", "__typename": "ArticleAuthorType" }, { "givenName": "Bongshin", "surname": "Lee", "fullName": "Bongshin Lee", "affiliation": "Microsoft Research, Redmond, WA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Jinwook", "surname": "Seo", "fullName": "Jinwook Seo", "affiliation": "Department of Computer Science and Engineering, Seoul National University, Seoul, Republic of Korea", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "07", "pubDate": "2021-07-01 00:00:00", "pubType": "trans", "pages": "3109-3122", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tg/2012/12/ttg2012122908", "title": "The User Puzzle—Explaining the Interaction with Visual Analytics Systems", "doi": null, "abstractUrl": "/journal/tg/2012/12/ttg2012122908/13rRUIIVlcH", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2013/08/mco2013080090", "title": "Bixplorer: Visual Analytics with Biclusters", "doi": null, "abstractUrl": "/magazine/co/2013/08/mco2013080090/13rRUwcAqvs", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2015/02/mcg2015020016", "title": "Preparing Undergraduates for Visual Analytics", "doi": null, "abstractUrl": "/magazine/cg/2015/02/mcg2015020016/13rRUxjQyjN", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/01/08019872", "title": "DeepEyes: Progressive Visual Analytics for Designing Deep Neural Networks", "doi": null, "abstractUrl": "/journal/tg/2018/01/08019872/13rRUxlgxTs", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/07/07473883", "title": "Approximated and User Steerable tSNE for Progressive Visual Analytics", "doi": null, "abstractUrl": "/journal/tg/2017/07/07473883/13rRUxly8T1", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/12/ttg2013122139", "title": "An Extensible Framework for Provenance in Human Terrain Visual Analytics", "doi": null, "abstractUrl": "/journal/tg/2013/12/ttg2013122139/13rRUyfbwqH", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/12/06876049", "title": "Progressive Visual Analytics: User-Driven Visual Exploration of In-Progress Analytics", "doi": null, "abstractUrl": "/journal/tg/2014/12/06876049/13rRUyogGAd", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bdva/2018/9194/0/08534019", "title": "Multiple Workspaces in Visual Analytics", "doi": null, "abstractUrl": "/proceedings-article/bdva/2018/08534019/17D45W9KVIu", "parentPublication": { "id": "proceedings/bdva/2018/9194/0", "title": "2018 International Symposium on Big Data Visual and Immersive Analytics (BDVA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vds/2022/5721/0/572100a006", "title": "Communication Analysis through Visual Analytics: Current Practices, Challenges, and New Frontiers", "doi": null, "abstractUrl": "/proceedings-article/vds/2022/572100a006/1JezMbpIoX6", "parentPublication": { "id": "proceedings/vds/2022/5721/0", "title": "2022 IEEE Visualization in Data Science (VDS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vizsec/2019/3876/0/09161633", "title": "NetCapVis: Web-based Progressive Visual Analytics for Network Packet Captures", "doi": null, "abstractUrl": "/proceedings-article/vizsec/2019/09161633/1m6hHX7VF7y", "parentPublication": { "id": "proceedings/vizsec/2019/3876/0", "title": "2019 IEEE Symposium on Visualization for Cyber Security (VizSec)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08941308", "articleId": "1g1FWXBrTxe", "__typename": "AdjacentArticleType" }, "next": { "fno": "08945399", "articleId": "1gbtOfrk8Mg", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1tWJc1fRMiY", "name": "ttg202107-08943144s1-supp1-2962404.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202107-08943144s1-supp1-2962404.mp4", "extension": "mp4", "size": "68.4 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1tWJ8EdItri", "title": "July", "year": "2021", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1gbtOfrk8Mg", "doi": "10.1109/TVCG.2019.2963015", "abstract": "Rendering participating media is important to the creation of photorealistic images. Participating media has a translucent aspect that comes from light being scattered inside the material. For materials with a small mean-free-path (mfp), multiple scattering effects dominate. Simulating these effects is computationally intensive, as it requires tracking a large number of scattering events inside the material. Existing approaches precompute multiple scattering events inside the material and store the results in a table. During rendering time, this table is used to compute the scattering effects. While these methods are faster than explicit scattering computation, they incur higher storage costs. In this paper, we present a new representation for double and multiple scattering effects that uses a neural network model. The scattering response from all homogeneous participating media is encoded into a neural network in a preprocessing step. At run time, the neural network is then used to predict the double and multiple scattering effects. We demonstrate the effects combined with Virtual Ray Lights (VRL), although our approach can be integrated with other rendering algorithms. Our algorithm is implemented on GPU. Double and multiple scattering effects for the entire participating media space are encoded using only 23.6 KB of memory. Our method achieves 50 ms per frame in typical scenes and provides results almost identical to the reference.", "abstracts": [ { "abstractType": "Regular", "content": "Rendering participating media is important to the creation of photorealistic images. Participating media has a translucent aspect that comes from light being scattered inside the material. For materials with a small mean-free-path (mfp), multiple scattering effects dominate. Simulating these effects is computationally intensive, as it requires tracking a large number of scattering events inside the material. Existing approaches precompute multiple scattering events inside the material and store the results in a table. During rendering time, this table is used to compute the scattering effects. While these methods are faster than explicit scattering computation, they incur higher storage costs. In this paper, we present a new representation for double and multiple scattering effects that uses a neural network model. The scattering response from all homogeneous participating media is encoded into a neural network in a preprocessing step. At run time, the neural network is then used to predict the double and multiple scattering effects. We demonstrate the effects combined with Virtual Ray Lights (VRL), although our approach can be integrated with other rendering algorithms. Our algorithm is implemented on GPU. Double and multiple scattering effects for the entire participating media space are encoded using only 23.6 KB of memory. Our method achieves 50 ms per frame in typical scenes and provides results almost identical to the reference.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Rendering participating media is important to the creation of photorealistic images. Participating media has a translucent aspect that comes from light being scattered inside the material. For materials with a small mean-free-path (mfp), multiple scattering effects dominate. Simulating these effects is computationally intensive, as it requires tracking a large number of scattering events inside the material. Existing approaches precompute multiple scattering events inside the material and store the results in a table. During rendering time, this table is used to compute the scattering effects. While these methods are faster than explicit scattering computation, they incur higher storage costs. In this paper, we present a new representation for double and multiple scattering effects that uses a neural network model. The scattering response from all homogeneous participating media is encoded into a neural network in a preprocessing step. At run time, the neural network is then used to predict the double and multiple scattering effects. We demonstrate the effects combined with Virtual Ray Lights (VRL), although our approach can be integrated with other rendering algorithms. Our algorithm is implemented on GPU. Double and multiple scattering effects for the entire participating media space are encoded using only 23.6 KB of memory. Our method achieves 50 ms per frame in typical scenes and provides results almost identical to the reference.", "title": "Interactive Simulation of Scattering Effects in Participating Media Using a Neural Network Model", "normalizedTitle": "Interactive Simulation of Scattering Effects in Participating Media Using a Neural Network Model", "fno": "08945399", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Simulation", "Graphics Processing Units", "Interactive Systems", "Neural Nets", "Ray Tracing", "Rendering Computer Graphics", "Scattering Events", "Ray Tracing", "Participating Media Space", "GPU", "VRL", "Virtual Ray Lights", "Storage Costs", "Double Scattering Effects", "Mean Free Path", "Photorealistic Images", "Rendering", "Interactive Simulation", "Homogeneous Participating Media", "Scattering Response", "Neural Network", "Multiple Scattering Effects", "Scattering", "Neural Networks", "Media", "Rendering Computer Graphics", "Photonics", "Computational Modeling", "Graphics Processing Units", "Participating Media", "Multiple Scattering", "Real Time", "Neural Network" ], "authors": [ { "givenName": "Liangsheng", "surname": "Ge", "fullName": "Liangsheng Ge", "affiliation": "School of Software, Shandong University, Jinan, Shandong, China", "__typename": "ArticleAuthorType" }, { "givenName": "Beibei", "surname": "Wang", "fullName": "Beibei Wang", "affiliation": "School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, Jiangsu, China", "__typename": "ArticleAuthorType" }, { "givenName": "Lu", "surname": "Wang", "fullName": "Lu Wang", "affiliation": "School of Software, Shandong University, Jinan, Shandong, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xiangxu", "surname": "Meng", "fullName": "Xiangxu Meng", "affiliation": "School of Software, Shandong University, Jinan, Shandong, China", "__typename": "ArticleAuthorType" }, { "givenName": "Nicolas", "surname": "Holzschuch", "fullName": "Nicolas Holzschuch", "affiliation": "Inria, CNRS, Grenoble INP LJK, University Grenoble Alpes, Grenoble, France", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2021-07-01 00:00:00", "pubType": "trans", "pages": "3123-3134", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2014/5209/0/5209c095", "title": "3D Acquisition of Occluded Surfaces from Scattering in Participating Media", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209c095/12OmNAlvHRN", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cis/2012/4896/0/4896a396", "title": "Fast Multiple Scattering in Participating Media with Beamlet Decomposition", "doi": null, "abstractUrl": "/proceedings-article/cis/2012/4896a396/12OmNwekjJa", "parentPublication": { "id": "proceedings/cis/2012/4896/0", "title": "2012 Eighth International Conference on Computational Intelligence and Security", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2013/5067/0/5067a183", "title": "Shape and Reflectance from Scattering in Participating Media", "doi": null, "abstractUrl": "/proceedings-article/3dv/2013/5067a183/12OmNxFsmJt", "parentPublication": { "id": "proceedings/3dv/2013/5067/0", "title": "2013 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvaui/2016/5870/0/5870a049", "title": "Shape Reconstruction of Objects in Participating Media by Combining Photometric Stereo and Optical Thickness", "doi": null, "abstractUrl": "/proceedings-article/cvaui/2016/5870a049/12OmNyUWQXe", "parentPublication": { "id": "proceedings/cvaui/2016/5870/0", "title": "2016 ICPR 2nd Workshop on Computer Vision for Analysis of Underwater Imagery (CVAUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rt/2008/2741/0/04634627", "title": "Interactive volumetric shadows in participating media with single-scattering", "doi": null, "abstractUrl": "/proceedings-article/rt/2008/04634627/12OmNyjtNIF", "parentPublication": { "id": "proceedings/rt/2008/2741/0", "title": "Symposium on Interactive Ray Tracing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2013/03/mcg2013030066", "title": "Double- and Multiple-Scattering Effects in Translucent Materials", "doi": null, "abstractUrl": "/magazine/cg/2013/03/mcg2013030066/13rRUIJcWfX", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2012/02/mcg2012020034", "title": "A Parallel Architecture for Interactively Rendering Scattering and Refraction Effects", "doi": null, "abstractUrl": "/magazine/cg/2012/02/mcg2012020034/13rRUwgyOfn", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/10/08093692", "title": "Point-Based Rendering for Homogeneous Participating Media with Refractive Boundaries", "doi": null, "abstractUrl": "/journal/tg/2018/10/08093692/13rRUy0qnGq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/07/08600345", "title": "Precomputed Multiple Scattering for Rapid Light Simulation in Participating Media", "doi": null, "abstractUrl": "/journal/tg/2020/07/08600345/17D45Xh13tH", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/10/08684333", "title": "Fast Computation of Single Scattering in Participating Media with Refractive Boundaries Using Frequency Analysis", "doi": null, "abstractUrl": "/journal/tg/2020/10/08684333/1keqXrXysr6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08943144", "articleId": "1g3bi26D34k", "__typename": "AdjacentArticleType" }, "next": { "fno": "08945380", "articleId": "1gbtN0iYlji", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1tWJeCio6Ck", "name": "ttg202107-08945399s1-supp1-2963015.wmv", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202107-08945399s1-supp1-2963015.wmv", "extension": "wmv", "size": "14.3 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1tWJ8EdItri", "title": "July", "year": "2021", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1gbtN0iYlji", "doi": "10.1109/TVCG.2019.2963018", "abstract": "We present a systematic review of visual analytics tools used for the analysis of blockchains-related data. The blockchain concept has recently received considerable attention and spurred applications in a variety of domains. We systematically and quantitatively assessed 76 analytics tools that have been proposed in research as well as online by professionals and blockchain enthusiasts. Our classification of these tools distinguishes (1) target blockchains, (2) blockchain data, (3) target audiences, (4) task domains, and (5) visualization types. Furthermore, we look at which aspects of blockchain data have already been explored and point out areas that deserve more investigation in the future.", "abstracts": [ { "abstractType": "Regular", "content": "We present a systematic review of visual analytics tools used for the analysis of blockchains-related data. The blockchain concept has recently received considerable attention and spurred applications in a variety of domains. We systematically and quantitatively assessed 76 analytics tools that have been proposed in research as well as online by professionals and blockchain enthusiasts. Our classification of these tools distinguishes (1) target blockchains, (2) blockchain data, (3) target audiences, (4) task domains, and (5) visualization types. Furthermore, we look at which aspects of blockchain data have already been explored and point out areas that deserve more investigation in the future.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a systematic review of visual analytics tools used for the analysis of blockchains-related data. The blockchain concept has recently received considerable attention and spurred applications in a variety of domains. We systematically and quantitatively assessed 76 analytics tools that have been proposed in research as well as online by professionals and blockchain enthusiasts. Our classification of these tools distinguishes (1) target blockchains, (2) blockchain data, (3) target audiences, (4) task domains, and (5) visualization types. Furthermore, we look at which aspects of blockchain data have already been explored and point out areas that deserve more investigation in the future.", "title": "Visualization of Blockchain Data: A Systematic Review", "normalizedTitle": "Visualization of Blockchain Data: A Systematic Review", "fno": "08945380", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Analysis", "Data Visualisation", "Blockchain Concept", "Blockchain Enthusiasts", "Visual Analytics Tools", "Blockchains Related Data", "Blockchain", "Bitcoin", "Data Visualization", "Tools", "Peer To Peer Computing", "Smart Contracts", "Blockchain", "Bitcoin", "Ethereum", "Information Visualization", "Visual Analytics", "State Of The Art Survey" ], "authors": [ { "givenName": "Natkamon", "surname": "Tovanich", "fullName": "Natkamon Tovanich", "affiliation": "IRT SystemX, Paris-Saclay, Palaiseau, France", "__typename": "ArticleAuthorType" }, { "givenName": "Nicolas", "surname": "Heulot", "fullName": "Nicolas Heulot", "affiliation": "IRT SystemX, Paris-Saclay, Palaiseau, France", "__typename": "ArticleAuthorType" }, { "givenName": "Jean-Daniel", "surname": "Fekete", "fullName": "Jean-Daniel Fekete", "affiliation": "Inria, Rocquencourt, France", "__typename": "ArticleAuthorType" }, { "givenName": "Petra", "surname": "Isenberg", "fullName": "Petra Isenberg", "affiliation": "Inria, Rocquencourt, France", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2021-07-01 00:00:00", "pubType": "trans", "pages": "3135-3152", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/bigdata-congress/2017/1996/0/08029379", "title": "An Overview of Blockchain Technology: Architecture, Consensus, and Future Trends", "doi": null, "abstractUrl": "/proceedings-article/bigdata-congress/2017/08029379/17D45Wc1IHZ", "parentPublication": { "id": "proceedings/bigdata-congress/2017/1996/0", "title": "2017 IEEE International Congress on Big Data (BigData Congress)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata/2018/7975/0/08726800", "title": "ReviewChain: Smart Contract Based Review System with Multi-Blockchain Gateway", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2018/08726800/1axfh7nlsJ2", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata/2018/7975/0", "title": "2018 IEEE International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata/2018/7975/0/08726513", "title": "Blockchain Tradeoffs and Challenges for Current and Emerging Applications: Generalization, Fragmentation, Sidechains, and Scalability", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2018/08726513/1axfhOZ2IFy", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata/2018/7975/0", "title": "2018 IEEE International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata/2018/7975/0/08726819", "title": "Astraea: A Decentralized Blockchain Oracle", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2018/08726819/1axfmTpu3Oo", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata/2018/7975/0", "title": "2018 IEEE International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccbb/2018/1277/0/08756390", "title": "Blockchain-based Smart Contracts: A Systematic Mapping Study of Academic Research (2018)", "doi": null, "abstractUrl": "/proceedings-article/iccbb/2018/08756390/1bzYmFeaE5W", "parentPublication": { "id": "proceedings/iccbb/2018/1277/0", "title": "2018 International Conference on Cloud Computing, Big Data and Blockchain (ICCBB)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccbb/2018/1277/0/08756455", "title": "Bitcoin Blockchain Transactions Visualization", "doi": null, "abstractUrl": "/proceedings-article/iccbb/2018/08756455/1bzYmpPzDEY", "parentPublication": { "id": "proceedings/iccbb/2018/1277/0", "title": "2018 International Conference on Cloud Computing, Big Data and Blockchain (ICCBB)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/euros&pw/2019/3026/0/302600a367", "title": "Erasing Data from Blockchain Nodes", "doi": null, "abstractUrl": "/proceedings-article/euros&pw/2019/302600a367/1cJ7azLFB0A", "parentPublication": { "id": "proceedings/euros&pw/2019/3026/0", "title": "2019 IEEE European Symposium on Security and Privacy Workshops (EuroS&PW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/sc/2020/02/08871202", "title": "Policy-Driven Blockchain and Its Applications for Transport Systems", "doi": null, "abstractUrl": "/journal/sc/2020/02/08871202/1eaVL0IEc4o", "parentPublication": { "id": "trans/sc", "title": "IEEE Transactions on Services Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ec/2021/04/08883080", "title": "Blockchain Mutability: Challenges and Proposed Solutions", "doi": null, "abstractUrl": "/journal/ec/2021/04/08883080/1epRVXf9Plm", "parentPublication": { "id": "trans/ec", "title": "IEEE Transactions on Emerging Topics in Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/oj/2020/01/09086439", "title": "XBlock-ETH: Extracting and Exploring Blockchain Data From Ethereum", "doi": null, "abstractUrl": "/journal/oj/2020/01/09086439/1jAchUW3PPi", "parentPublication": { "id": "trans/oj", "title": "IEEE Open Journal of the Computer Society", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08945399", "articleId": "1gbtOfrk8Mg", "__typename": "AdjacentArticleType" }, "next": { "fno": "08947991", "articleId": "1geNIC9zdiU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1tWJ8EdItri", "title": "July", "year": "2021", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1geNIC9zdiU", "doi": "10.1109/TVCG.2019.2963651", "abstract": "Developing an algorithm for a visualization prototype often involves the direct comparison of different development stages and design decisions, and even minor modifications may dramatically affect the results. While existing development tools provide visualizations for gaining general insight into performance and structural aspects of the source code, they neglect the central importance of result images unique to graphical algorithms. In this article, we present a novel approach that enables visualization programmers to simultaneously explore the evolution of their algorithm during the development phase together with its corresponding visual outcomes by providing an automatically updating meta visualization. Our interactive system allows for the direct comparison of all development states on both the visual and the source code level, by providing easy to use navigation and comparison tools. The on-the-fly construction of difference images, source code differences, and a visual representation of the source code structure further enhance the user's insight into the states' interconnected changes over time. Our solution is accessible via a web-based interface that provides GPU-accelerated live execution of C++ and GLSL code, as well as supporting a domain-specific programming language for scientific visualization.", "abstracts": [ { "abstractType": "Regular", "content": "Developing an algorithm for a visualization prototype often involves the direct comparison of different development stages and design decisions, and even minor modifications may dramatically affect the results. While existing development tools provide visualizations for gaining general insight into performance and structural aspects of the source code, they neglect the central importance of result images unique to graphical algorithms. In this article, we present a novel approach that enables visualization programmers to simultaneously explore the evolution of their algorithm during the development phase together with its corresponding visual outcomes by providing an automatically updating meta visualization. Our interactive system allows for the direct comparison of all development states on both the visual and the source code level, by providing easy to use navigation and comparison tools. The on-the-fly construction of difference images, source code differences, and a visual representation of the source code structure further enhance the user's insight into the states' interconnected changes over time. Our solution is accessible via a web-based interface that provides GPU-accelerated live execution of C++ and GLSL code, as well as supporting a domain-specific programming language for scientific visualization.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Developing an algorithm for a visualization prototype often involves the direct comparison of different development stages and design decisions, and even minor modifications may dramatically affect the results. While existing development tools provide visualizations for gaining general insight into performance and structural aspects of the source code, they neglect the central importance of result images unique to graphical algorithms. In this article, we present a novel approach that enables visualization programmers to simultaneously explore the evolution of their algorithm during the development phase together with its corresponding visual outcomes by providing an automatically updating meta visualization. Our interactive system allows for the direct comparison of all development states on both the visual and the source code level, by providing easy to use navigation and comparison tools. The on-the-fly construction of difference images, source code differences, and a visual representation of the source code structure further enhance the user's insight into the states' interconnected changes over time. Our solution is accessible via a web-based interface that provides GPU-accelerated live execution of C++ and GLSL code, as well as supporting a domain-specific programming language for scientific visualization.", "title": "Vis-a-Vis: Visual Exploration of Visualization Source Code Evolution", "normalizedTitle": "Vis-a-Vis: Visual Exploration of Visualization Source Code Evolution", "fno": "08947991", "hasPdf": true, "idPrefix": "tg", "keywords": [ "C Language", "Data Visualisation", "High Level Languages", "Interactive Systems", "Internet", "Software Prototyping", "Source Code Software", "User Interfaces", "Data Visualization", "Software Visualization", "C", "Domain Specific Programming Language", "Web Based Interface", "Interactive System", "GLSL Code", "Source Code Structure", "Visual Representation", "Meta Visualization", "Visualization Programmers", "Development Tools", "Design Decisions", "Visualization Source Code Evolution", "Visual Exploration", "Data Visualization", "Visualization", "Tools", "Software", "Pipelines", "Task Analysis", "History", "Visualization System And Toolkit Design", "User Interfaces", "Integrating Spatial And Non Spatial Data Visualization", "Software Visualization" ], "authors": [ { "givenName": "Fabian", "surname": "Bolte", "fullName": "Fabian Bolte", "affiliation": "Department of Informatics, University of Bergen, Bergen, Norway", "__typename": "ArticleAuthorType" }, { "givenName": "Stefan", "surname": "Bruckner", "fullName": "Stefan Bruckner", "affiliation": "Department of Informatics, University of Bergen, Bergen, Norway", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2021-07-01 00:00:00", "pubType": "trans", "pages": "3153-3167", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iwsc/2017/6595/0/07880509", "title": "Evolution of code clone ratios throughout development history of open-source C and C++ programs", "doi": null, "abstractUrl": "/proceedings-article/iwsc/2017/07880509/12OmNAWpys7", "parentPublication": { "id": "proceedings/iwsc/2017/6595/0", "title": "2017 IEEE 11th International Workshop on Software Clones (IWSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/saner/2018/4969/0/08330255", "title": "ChangeMacroRecorder: Recording fine-grained textual changes of source code", "doi": null, "abstractUrl": "/proceedings-article/saner/2018/08330255/12OmNCm7BEh", "parentPublication": { "id": "proceedings/saner/2018/4969/0", "title": "2018 IEEE 25th International Conference on Software Analysis, Evolution and Reengineering (SANER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpc/2016/1428/0/07503732", "title": "Embedding programming context into source code", "doi": null, "abstractUrl": "/proceedings-article/icpc/2016/07503732/12OmNvStcNR", "parentPublication": { "id": "proceedings/icpc/2016/1428/0", "title": "2016 IEEE 24th International Conference on Program Comprehension (ICPC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2015/7568/0/7568a021", "title": "Visual Analysis of Source Code Similarities", "doi": null, "abstractUrl": "/proceedings-article/iv/2015/7568a021/12OmNyUnEBg", "parentPublication": { "id": "proceedings/iv/2015/7568/0", "title": "2015 19th International Conference on Information Visualisation (iV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vissoft/2005/9540/0/01684304", "title": "Interactive Visual Mechanisms for Exploring Source Code Evolution", "doi": null, "abstractUrl": "/proceedings-article/vissoft/2005/01684304/12OmNzcPA59", "parentPublication": { "id": "proceedings/vissoft/2005/9540/0", "title": "2005 3rd IEEE International Workshop on Visualizing Software for Understanding and Analysis", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsme/2019/3094/0/309400a615", "title": "Enhancing Eye Tracking of Source Code: A Specialized Fixation Filter for Source Code", "doi": null, "abstractUrl": "/proceedings-article/icsme/2019/309400a615/1fHlIz1I68E", "parentPublication": { "id": "proceedings/icsme/2019/3094/0", "title": "2019 IEEE International Conference on Software Maintenance and Evolution (ICSME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vissoft/2020/9914/0/991400a027", "title": "Exploring Developer Preferences for Visualizing External Information Within Source Code Editors", "doi": null, "abstractUrl": "/proceedings-article/vissoft/2020/991400a027/1olHBF331hC", "parentPublication": { "id": "proceedings/vissoft/2020/9914/0", "title": "2020 Working Conference on Software Visualization (VISSOFT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ase/2020/6768/0/676800a809", "title": "Predicting Code Context Models for Software Development Tasks", "doi": null, "abstractUrl": "/proceedings-article/ase/2020/676800a809/1pP3LkTQQ2A", "parentPublication": { "id": "proceedings/ase/2020/6768/0", "title": "2020 35th IEEE/ACM International Conference on Automated Software Engineering (ASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse-companion/2021/1219/0/121900a135", "title": "A Better Approach to Track the Evolution of Static Code Warnings", "doi": null, "abstractUrl": "/proceedings-article/icse-companion/2021/121900a135/1sET6g5a9IQ", "parentPublication": { "id": "proceedings/icse-companion/2021/1219/0/", "title": "2021 IEEE/ACM 43rd International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse/2021/0296/0/029600b510", "title": "CodeShovel: Constructing Method-Level Source Code Histories", "doi": null, "abstractUrl": "/proceedings-article/icse/2021/029600b510/1sEXpAV28Wk", "parentPublication": { "id": "proceedings/icse/2021/0296/0/", "title": "2021 IEEE/ACM 43rd International Conference on Software Engineering (ICSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08945380", "articleId": "1gbtN0iYlji", "__typename": "AdjacentArticleType" }, "next": { "fno": "08948010", "articleId": "1geNJ2aq0nu", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1tWJ9AwA6UU", "name": "ttg202107-08947991s1-tvcg-2963651-mm.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202107-08947991s1-tvcg-2963651-mm.zip", "extension": "zip", "size": "130 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1tWJ8EdItri", "title": "July", "year": "2021", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1geNJ2aq0nu", "doi": "10.1109/TVCG.2019.2963659", "abstract": "Analyzing students' emotions from classroom videos can help both teachers and parents quickly know the engagement of students in class. The availability of high-definition cameras creates opportunities to record class scenes. However, watching videos is time-consuming, and it is challenging to gain a quick overview of the emotion distribution and find abnormal emotions. In this article, we propose EmotionCues, a visual analytics system to easily analyze classroom videos from the perspective of emotion summary and detailed analysis, which integrates emotion recognition algorithms with visualizations. It consists of three coordinated views: a summary view depicting the overall emotions and their dynamic evolution, a character view presenting the detailed emotion status of an individual, and a video view enhancing the video analysis with further details. Considering the possible inaccuracy of emotion recognition, we also explore several factors affecting the emotion analysis, such as face size and occlusion. They provide hints for inferring the possible inaccuracy and the corresponding reasons. Two use cases and interviews with end users and domain experts are conducted to show that the proposed system could be useful and effective for analyzing emotions in the classroom videos.", "abstracts": [ { "abstractType": "Regular", "content": "Analyzing students' emotions from classroom videos can help both teachers and parents quickly know the engagement of students in class. The availability of high-definition cameras creates opportunities to record class scenes. However, watching videos is time-consuming, and it is challenging to gain a quick overview of the emotion distribution and find abnormal emotions. In this article, we propose EmotionCues, a visual analytics system to easily analyze classroom videos from the perspective of emotion summary and detailed analysis, which integrates emotion recognition algorithms with visualizations. It consists of three coordinated views: a summary view depicting the overall emotions and their dynamic evolution, a character view presenting the detailed emotion status of an individual, and a video view enhancing the video analysis with further details. Considering the possible inaccuracy of emotion recognition, we also explore several factors affecting the emotion analysis, such as face size and occlusion. They provide hints for inferring the possible inaccuracy and the corresponding reasons. Two use cases and interviews with end users and domain experts are conducted to show that the proposed system could be useful and effective for analyzing emotions in the classroom videos.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Analyzing students' emotions from classroom videos can help both teachers and parents quickly know the engagement of students in class. The availability of high-definition cameras creates opportunities to record class scenes. However, watching videos is time-consuming, and it is challenging to gain a quick overview of the emotion distribution and find abnormal emotions. In this article, we propose EmotionCues, a visual analytics system to easily analyze classroom videos from the perspective of emotion summary and detailed analysis, which integrates emotion recognition algorithms with visualizations. It consists of three coordinated views: a summary view depicting the overall emotions and their dynamic evolution, a character view presenting the detailed emotion status of an individual, and a video view enhancing the video analysis with further details. Considering the possible inaccuracy of emotion recognition, we also explore several factors affecting the emotion analysis, such as face size and occlusion. They provide hints for inferring the possible inaccuracy and the corresponding reasons. Two use cases and interviews with end users and domain experts are conducted to show that the proposed system could be useful and effective for analyzing emotions in the classroom videos.", "title": "<italic>EmotionCues</italic>: Emotion-Oriented Visual Summarization of Classroom Videos", "normalizedTitle": "EmotionCues: Emotion-Oriented Visual Summarization of Classroom Videos", "fno": "08948010", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Aided Instruction", "Data Analysis", "Data Visualisation", "Emotion Recognition", "Video Signal Processing", "Emotion Cues", "Emotion Oriented Visual Summarization", "Classroom Videos", "Emotion Distribution", "Visual Analytics System", "Emotion Summary", "Emotion Recognition Algorithms", "Summary View", "Video View", "Video Analysis", "Emotion Analysis", "High Definition Cameras", "Visualizations", "Character View", "Face Size", "Occlusion", "Videos", "Data Visualization", "Visualization", "Emotion Recognition", "Uncertainty", "Face", "Computer Vision", "Emotion", "Classroom Videos", "Visual Summarization", "Visual Analytics" ], "authors": [ { "givenName": "Haipeng", "surname": "Zeng", "fullName": "Haipeng Zeng", "affiliation": "Hong Kong University of Science and Technology, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Xinhuan", "surname": "Shu", "fullName": "Xinhuan Shu", "affiliation": "Hong Kong University of Science and Technology, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Yanbang", "surname": "Wang", "fullName": "Yanbang Wang", "affiliation": "Hong Kong University of Science and Technology, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Yong", "surname": "Wang", "fullName": "Yong Wang", "affiliation": "Hong Kong University of Science and Technology, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Liguo", "surname": "Zhang", "fullName": "Liguo Zhang", "affiliation": "Harbin Engineering University, Harbin, China", "__typename": "ArticleAuthorType" }, { "givenName": "Ting-Chuen", "surname": "Pong", "fullName": "Ting-Chuen Pong", "affiliation": "Hong Kong University of Science and Technology, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Huamin", "surname": "Qu", "fullName": "Huamin Qu", "affiliation": "Hong Kong University of Science and Technology, Hong Kong", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2021-07-01 00:00:00", "pubType": "trans", "pages": "3168-3181", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icme/2014/4761/0/06890161", "title": "Emotion recognition from users' EEG signals with the help of stimulus VIDEOS", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890161/12OmNx3ZjgI", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2015/02/07029058", "title": "Predicting Mood from Punctual Emotion Annotations on Videos", "doi": null, "abstractUrl": "/journal/ta/2015/02/07029058/13rRUB7a1e8", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2018/02/07723914", "title": "Heterogeneous Knowledge Transfer in Video Emotion Recognition, Attribution and Summarization", "doi": null, "abstractUrl": "/journal/ta/2018/02/07723914/13rRUwI5Ujh", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcabes/2021/2889/0/288900a108", "title": "Classroom monitoring system based on facial expression recognition", "doi": null, "abstractUrl": "/proceedings-article/dcabes/2021/288900a108/1AqwrYlvDMI", "parentPublication": { "id": "proceedings/dcabes/2021/2889/0", "title": "2021 20th International Symposium on Distributed Computing and Applications for Business Engineering and Science (DCABES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/assic/2022/6109/0/10088292", "title": "Emotion Recognition From Online Classroom Videos Using Meta Learning", "doi": null, "abstractUrl": "/proceedings-article/assic/2022/10088292/1M4rHyGJO00", "parentPublication": { "id": "proceedings/assic/2022/6109/0", "title": "2022 International Conference on Advancements in Smart, Secure and Intelligent Computing (ASSIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08807235", "title": "<italic>EmoCo</italic>: Visual Analysis of Emotion Coherence in Presentation Videos", "doi": null, "abstractUrl": "/journal/tg/2020/01/08807235/1cG6m1AVG6c", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2023/01/09271885", "title": "When and Why Static Images Are More Effective Than Videos", "doi": null, "abstractUrl": "/journal/ta/2023/01/09271885/1p2R2meHvYQ", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/5555/01/09397275", "title": "Exploring the contextual factors affecting multimodal emotion recognition in videos", "doi": null, "abstractUrl": "/journal/ta/5555/01/09397275/1sA4NnVEdtm", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi-iat/2020/1924/0/192400a514", "title": "Happy Emotion Recognition in Videos Via Apex Spotting and Temporal Models", "doi": null, "abstractUrl": "/proceedings-article/wi-iat/2020/192400a514/1uHhkZvdaTu", "parentPublication": { "id": "proceedings/wi-iat/2020/1924/0", "title": "2020 IEEE/WIC/ACM International Joint Conference on Web Intelligence and Intelligent Agent Technology (WI-IAT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2021/2354/0/235400a184", "title": "Musical Hyperlapse: A Multimodal Approach to Accelerate First-Person Videos", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2021/235400a184/1zurthy1oJ2", "parentPublication": { "id": "proceedings/sibgrapi/2021/2354/0", "title": "2021 34th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08947991", "articleId": "1geNIC9zdiU", "__typename": "AdjacentArticleType" }, "next": { "fno": "08952604", "articleId": "1gqqhpSZ19S", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1tWJ8EdItri", "title": "July", "year": "2021", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1gqqhpSZ19S", "doi": "10.1109/TVCG.2020.2964758", "abstract": "Hand interaction techniques in virtual reality often exploit visual dominance over proprioception to remap physical hand movements onto different virtual movements. However, when the offset between virtual and physical hands increases, the remapped virtual hand movements are hardly self-attributed, and the users become aware of the remapping. Interestingly, the sense of self-attribution of a body is called the sense of body ownership (SoBO) in the field of psychology, and the realistic the avatar, the stronger is the SoBO. Hence, we hypothesized that realistic avatars (i.e., human hands) can foster self-attribution of the remapped movements better than abstract avatars (i.e., spherical pointers), thus making the remapping less noticeable. In this article, we present an experiment in which participants repeatedly executed reaching movements with their right hand while different amounts of horizontal shifts were applied. We measured the remapping detection thresholds for each combination of shift directions (left or right) and avatar appearances (realistic or abstract). The results show that realistic avatars increased the detection threshold (i.e., lowered sensitivity) by 31.3 percent than the abstract avatars when the leftward shift was applied (i.e., when the hand moved in the direction away from the body-midline). In addition, the proprioceptive drift (i.e., the displacement of self-localization toward an avatar) was larger with realistic avatars for leftward shifts, indicating that visual information was given greater preference during visuo-proprioceptive integration in realistic avatars. Our findings quantifiably show that realistic avatars can make remapping less noticeable for larger mismatches between virtual and physical movements and can potentially improve a wide variety of hand-remapping techniques without changing the mapping itself.", "abstracts": [ { "abstractType": "Regular", "content": "Hand interaction techniques in virtual reality often exploit visual dominance over proprioception to remap physical hand movements onto different virtual movements. However, when the offset between virtual and physical hands increases, the remapped virtual hand movements are hardly self-attributed, and the users become aware of the remapping. Interestingly, the sense of self-attribution of a body is called the sense of body ownership (SoBO) in the field of psychology, and the realistic the avatar, the stronger is the SoBO. Hence, we hypothesized that realistic avatars (i.e., human hands) can foster self-attribution of the remapped movements better than abstract avatars (i.e., spherical pointers), thus making the remapping less noticeable. In this article, we present an experiment in which participants repeatedly executed reaching movements with their right hand while different amounts of horizontal shifts were applied. We measured the remapping detection thresholds for each combination of shift directions (left or right) and avatar appearances (realistic or abstract). The results show that realistic avatars increased the detection threshold (i.e., lowered sensitivity) by 31.3 percent than the abstract avatars when the leftward shift was applied (i.e., when the hand moved in the direction away from the body-midline). In addition, the proprioceptive drift (i.e., the displacement of self-localization toward an avatar) was larger with realistic avatars for leftward shifts, indicating that visual information was given greater preference during visuo-proprioceptive integration in realistic avatars. Our findings quantifiably show that realistic avatars can make remapping less noticeable for larger mismatches between virtual and physical movements and can potentially improve a wide variety of hand-remapping techniques without changing the mapping itself.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Hand interaction techniques in virtual reality often exploit visual dominance over proprioception to remap physical hand movements onto different virtual movements. However, when the offset between virtual and physical hands increases, the remapped virtual hand movements are hardly self-attributed, and the users become aware of the remapping. Interestingly, the sense of self-attribution of a body is called the sense of body ownership (SoBO) in the field of psychology, and the realistic the avatar, the stronger is the SoBO. Hence, we hypothesized that realistic avatars (i.e., human hands) can foster self-attribution of the remapped movements better than abstract avatars (i.e., spherical pointers), thus making the remapping less noticeable. In this article, we present an experiment in which participants repeatedly executed reaching movements with their right hand while different amounts of horizontal shifts were applied. We measured the remapping detection thresholds for each combination of shift directions (left or right) and avatar appearances (realistic or abstract). The results show that realistic avatars increased the detection threshold (i.e., lowered sensitivity) by 31.3 percent than the abstract avatars when the leftward shift was applied (i.e., when the hand moved in the direction away from the body-midline). In addition, the proprioceptive drift (i.e., the displacement of self-localization toward an avatar) was larger with realistic avatars for leftward shifts, indicating that visual information was given greater preference during visuo-proprioceptive integration in realistic avatars. Our findings quantifiably show that realistic avatars can make remapping less noticeable for larger mismatches between virtual and physical movements and can potentially improve a wide variety of hand-remapping techniques without changing the mapping itself.", "title": "Effect of Avatar Appearance on Detection Thresholds for Remapped Hand Movements", "normalizedTitle": "Effect of Avatar Appearance on Detection Thresholds for Remapped Hand Movements", "fno": "08952604", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Avatars", "Human Computer Interaction", "Avatar Appearance", "Detection Threshold", "Remapped Hand Movements", "Hand Interaction Techniques", "Virtual Reality", "Physical Hand Movements", "Virtual Movements", "Virtual Hands", "Remapped Virtual Hand Movements", "Realistic Avatars", "Remapping Detection Thresholds", "Hand Remapping Techniques", "Avatars", "Visualization", "Haptic Interfaces", "Shape", "Psychology", "Sensitivity", "Virtual Reality", "Body Ownership", "3 D Interaction", "Hand Interaction", "Hand Redirection", "Hand Retargeting", "Avatar" ], "authors": [ { "givenName": "Nami", "surname": "Ogawa", "fullName": "Nami Ogawa", "affiliation": "University of Tokyo, Tokyo, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Takuji", "surname": "Narumi", "fullName": "Takuji Narumi", "affiliation": "University of Tokyo and JST PRESTO, Tokyo, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Michitaka", "surname": "Hirose", "fullName": "Michitaka Hirose", "affiliation": "University of Tokyo, Tokyo, Japan", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2021-07-01 00:00:00", "pubType": "trans", "pages": "3182-3197", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2015/1727/0/07223377", "title": "Avatar embodiment realism and virtual fitness training", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223377/12OmNCcKQFn", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2013/01/tth2013010106", "title": "Using Postural Synergies to Animate a Low-Dimensional Hand Avatar in Haptic Simulation", "doi": null, "abstractUrl": "/journal/th/2013/01/tth2013010106/13rRUwInv4z", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/04/08260974", "title": "Evaluating Remapped Physical Reach for Hand Interactions with Passive Haptics in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2018/04/08260974/13rRUwkxc5s", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a057", "title": "Visual Fidelity Effects on Expressive Self-avatar in Virtual Reality: First Impressions Matter", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a057/1CJc41zMnFC", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a772", "title": "Embodiment of an Avatar with Unnatural Arm Movements", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a772/1J7W9fEjd6g", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798143", "title": "Estimating Detection Thresholds for Desktop-Scale Hand Redirection in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798143/1cJ0GRxSQwM", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798040", "title": "Virtual Hand Realism Affects Object Size Perception in Body-Based Scaling", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798040/1cJ14CI2Jsk", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/05/08998371", "title": "The Impact of a Self-Avatar, Hand Collocation, and Hand Proximity on Embodiment and Stroop Interference", "doi": null, "abstractUrl": "/journal/tg/2020/05/08998371/1hrXiia6v9C", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089480", "title": "Detection of Scaled Hand Interactions in Virtual Reality: The Effects of Motion Direction and Task Complexity", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089480/1jIx9NUNYPu", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a455", "title": "Correction of Avatar Hand Movements Supports Learning of a Motor Skill", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a455/1tuBfJZ11HG", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08948010", "articleId": "1geNJ2aq0nu", "__typename": "AdjacentArticleType" }, "next": { "fno": "08954740", "articleId": "1gs4WiaCzYY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1tWJdM0dMVq", "name": "ttg202107-08952604s1-supp1-2964758.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202107-08952604s1-supp1-2964758.mp4", "extension": "mp4", "size": "105 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1tWJ8EdItri", "title": "July", "year": "2021", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1gs4WiaCzYY", "doi": "10.1109/TVCG.2020.2965097", "abstract": "This article proposes an approach to content-preserving image stitching with regular boundary constraints, which aims to stitch multiple images to generate a panoramic image with piecewise rectangular boundaries. Existing methods treat image stitching and rectangling as two separate steps, which may result in suboptimal results as the stitching process is not aware of the further warping needs for rectangling. We address these limitations by formulating image stitching with regular boundaries in a unified optimization framework. Starting from the initial stitching result produced by traditional warping-based optimization, we obtain the irregular boundary from the warped meshes by polygon Boolean operations which robustly handle arbitrary mesh compositions. By analyzing the irregular boundary, we construct a piecewise rectangular boundary. Based on this, we further incorporate line and regular boundary preservation constraints into the image stitching framework, and conduct iterative optimizations to obtain an optimal piecewise rectangular boundary. Thus we can make the boundary of the stitching result as close as possible to a rectangle, while reducing unwanted distortions. We further extend our method to video stitching, by integrating the temporal coherence into the optimization. Experiments show that our method efficiently produces visually pleasing panoramas with regular boundaries and unnoticeable distortions.", "abstracts": [ { "abstractType": "Regular", "content": "This article proposes an approach to content-preserving image stitching with regular boundary constraints, which aims to stitch multiple images to generate a panoramic image with piecewise rectangular boundaries. Existing methods treat image stitching and rectangling as two separate steps, which may result in suboptimal results as the stitching process is not aware of the further warping needs for rectangling. We address these limitations by formulating image stitching with regular boundaries in a unified optimization framework. Starting from the initial stitching result produced by traditional warping-based optimization, we obtain the irregular boundary from the warped meshes by polygon Boolean operations which robustly handle arbitrary mesh compositions. By analyzing the irregular boundary, we construct a piecewise rectangular boundary. Based on this, we further incorporate line and regular boundary preservation constraints into the image stitching framework, and conduct iterative optimizations to obtain an optimal piecewise rectangular boundary. Thus we can make the boundary of the stitching result as close as possible to a rectangle, while reducing unwanted distortions. We further extend our method to video stitching, by integrating the temporal coherence into the optimization. Experiments show that our method efficiently produces visually pleasing panoramas with regular boundaries and unnoticeable distortions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This article proposes an approach to content-preserving image stitching with regular boundary constraints, which aims to stitch multiple images to generate a panoramic image with piecewise rectangular boundaries. Existing methods treat image stitching and rectangling as two separate steps, which may result in suboptimal results as the stitching process is not aware of the further warping needs for rectangling. We address these limitations by formulating image stitching with regular boundaries in a unified optimization framework. Starting from the initial stitching result produced by traditional warping-based optimization, we obtain the irregular boundary from the warped meshes by polygon Boolean operations which robustly handle arbitrary mesh compositions. By analyzing the irregular boundary, we construct a piecewise rectangular boundary. Based on this, we further incorporate line and regular boundary preservation constraints into the image stitching framework, and conduct iterative optimizations to obtain an optimal piecewise rectangular boundary. Thus we can make the boundary of the stitching result as close as possible to a rectangle, while reducing unwanted distortions. We further extend our method to video stitching, by integrating the temporal coherence into the optimization. Experiments show that our method efficiently produces visually pleasing panoramas with regular boundaries and unnoticeable distortions.", "title": "Content-Preserving Image Stitching With Piecewise Rectangular Boundary Constraints", "normalizedTitle": "Content-Preserving Image Stitching With Piecewise Rectangular Boundary Constraints", "fno": "08954740", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computational Geometry", "Feature Extraction", "Image Matching", "Image Resolution", "Image Segmentation", "Iterative Methods", "Optimisation", "Video Signal Processing", "Regular Boundaries", "Unified Optimization Framework", "Initial Stitching Result", "Traditional Warping Based Optimization", "Irregular Boundary", "Regular Boundary Preservation Constraints", "Image Stitching Framework", "Optimal Piecewise Rectangular Boundary", "Video Stitching", "Content Preserving Image Stitching", "Piecewise Rectangular Boundary Constraints", "Regular Boundary Constraints", "Multiple Images", "Panoramic Image", "Rectangling", "Stitching Process", "Cameras", "Distortion", "Optimization", "Visualization", "Streaming Media", "Content Preserving Image Stitching", "Panoramic Image", "Rectangling", "Polygon Boolean Operations", "Piecewise Rectangular Boundary" ], "authors": [ { "givenName": "Yun", "surname": "Zhang", "fullName": "Yun Zhang", "affiliation": "College of Media Engineering, Communication University of Zhejiang, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yu-Kun", "surname": "Lai", "fullName": "Yu-Kun Lai", "affiliation": "School of Computer Science and Informatics, Cardiff University, Cardiff, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Fang-Lue", "surname": "Zhang", "fullName": "Fang-Lue Zhang", "affiliation": "School of Engineering and Computer Science, Victoria University of Wellington, Wellington, New Zealand", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2021-07-01 00:00:00", "pubType": "trans", "pages": "3198-3212", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2014/5118/0/5118d262", "title": "Parallax-Tolerant Image Stitching", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118d262/12OmNCcKQvB", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/uic-atc-scalcom/2014/7646/0/7646a284", "title": "uStitchHub: Stitching Multi-touch Trajectories on Tiled Very Large Tabletops", "doi": null, "abstractUrl": "/proceedings-article/uic-atc-scalcom/2014/7646a284/12OmNrkjVpK", "parentPublication": { "id": "proceedings/uic-atc-scalcom/2014/7646/0", "title": "2014 IEEE 11th Intl Conf on Ubiquitous Intelligence & Computing and 2014 IEEE 11th Intl Conf on Autonomic & Trusted Computing and 2014 IEEE 14th Intl Conf on Scalable Computing and Communications and Its Associated Workshops (UIC-ATC-ScalCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2015/7082/0/07177505", "title": "Multi-objective content preserving warping for image stitching", "doi": null, "abstractUrl": "/proceedings-article/icme/2015/07177505/12OmNvk7JU9", "parentPublication": { "id": "proceedings/icme/2015/7082/0", "title": "2015 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iptc/2010/4196/0/4196a495", "title": "Seamless Image Stitching Using Optimized Boundary Matching for Gradient and Curvature", "doi": null, "abstractUrl": "/proceedings-article/iptc/2010/4196a495/12OmNvrMUgk", "parentPublication": { "id": "proceedings/iptc/2010/4196/0", "title": "Intelligence Information Processing and Trusted Computing, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2015/6759/0/07301374", "title": "Video stitching with spatial-temporal content-preserving warping", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2015/07301374/12OmNxecS6g", "parentPublication": { "id": "proceedings/cvprw/2015/6759/0", "title": "2015 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visapp/2014/8133/1/07294872", "title": "Effortless scanning of 3D object models by boundary aligning and stitching", "doi": null, "abstractUrl": "/proceedings-article/visapp/2014/07294872/12OmNylsZPD", "parentPublication": { "id": "proceedings/visapp/2014/8133/1", "title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/glsv/1994/5610/0/00289999", "title": "Estimating the storage requirements of the rectangular and L-shaped corner stitching data structures", "doi": null, "abstractUrl": "/proceedings-article/glsv/1994/00289999/12OmNzZ5ohv", "parentPublication": { "id": "proceedings/glsv/1994/5610/0", "title": "Proceedings of 4th Great Lakes Symposium on VLSI", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icinc/2022/0969/0/096900a209", "title": "A Quadtree Based Piecewise Poisson Seamless Composition Algorithm for Image Stitching", "doi": null, "abstractUrl": "/proceedings-article/icinc/2022/096900a209/1M675hcesvu", "parentPublication": { "id": "proceedings/icinc/2022/0969/0", "title": "2022 International Conference on Informatics, Networking and Computing (ICINC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2018/8497/0/849700a066", "title": "Research on Cylindrical Panoramic Video Stitching and AR Perspective Observation Algorithm", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2018/849700a066/1a3x60PNpwQ", "parentPublication": { "id": "proceedings/icvrv/2018/8497/0", "title": "2018 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2019/4752/0/09212912", "title": "Robust Microscope Image Stitching Using Multiple Zooming Levels", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2019/09212912/1nHRQKQeDfO", "parentPublication": { "id": "proceedings/icvrv/2019/4752/0", "title": "2019 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08952604", "articleId": "1gqqhpSZ19S", "__typename": "AdjacentArticleType" }, "next": { "fno": "08954824", "articleId": "1gs4VkbA9LG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1tWJ8EdItri", "title": "July", "year": "2021", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1gs4VkbA9LG", "doi": "10.1109/TVCG.2020.2965109", "abstract": "We present VRIA, a Web-based framework for creating Immersive Analytics (IA) experiences in Virtual Reality. VRIA is built upon WebVR, A-Frame, React and D3.js, and offers a visualization creation workflow which enables users, of different levels of expertise, to rapidly develop Immersive Analytics experiences for the Web. The use of these open-standards Web-based technologies allows us to implement VR experiences in a browser and offers strong synergies with popular visualization libraries, through the HTML Document Object Model (DOM). This makes VRIA ubiquitous and platform-independent. Moreover, by using WebVR's progressive enhancement, the experiences VRIA creates are accessible on a plethora of devices. We elaborate on our motivation for focusing on open-standards Web technologies, present the VRIA creation workflow and detail the underlying mechanics of our framework. We also report on techniques and optimizations necessary for implementing Immersive Analytics experiences on the Web, discuss scalability implications of our framework, and present a series of use case applications to demonstrate the various features of VRIA. Finally, we discuss current limitations of our framework, the lessons learned from its development, and outline further extensions.", "abstracts": [ { "abstractType": "Regular", "content": "We present VRIA, a Web-based framework for creating Immersive Analytics (IA) experiences in Virtual Reality. VRIA is built upon WebVR, A-Frame, React and D3.js, and offers a visualization creation workflow which enables users, of different levels of expertise, to rapidly develop Immersive Analytics experiences for the Web. The use of these open-standards Web-based technologies allows us to implement VR experiences in a browser and offers strong synergies with popular visualization libraries, through the HTML Document Object Model (DOM). This makes VRIA ubiquitous and platform-independent. Moreover, by using WebVR's progressive enhancement, the experiences VRIA creates are accessible on a plethora of devices. We elaborate on our motivation for focusing on open-standards Web technologies, present the VRIA creation workflow and detail the underlying mechanics of our framework. We also report on techniques and optimizations necessary for implementing Immersive Analytics experiences on the Web, discuss scalability implications of our framework, and present a series of use case applications to demonstrate the various features of VRIA. Finally, we discuss current limitations of our framework, the lessons learned from its development, and outline further extensions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present VRIA, a Web-based framework for creating Immersive Analytics (IA) experiences in Virtual Reality. VRIA is built upon WebVR, A-Frame, React and D3.js, and offers a visualization creation workflow which enables users, of different levels of expertise, to rapidly develop Immersive Analytics experiences for the Web. The use of these open-standards Web-based technologies allows us to implement VR experiences in a browser and offers strong synergies with popular visualization libraries, through the HTML Document Object Model (DOM). This makes VRIA ubiquitous and platform-independent. Moreover, by using WebVR's progressive enhancement, the experiences VRIA creates are accessible on a plethora of devices. We elaborate on our motivation for focusing on open-standards Web technologies, present the VRIA creation workflow and detail the underlying mechanics of our framework. We also report on techniques and optimizations necessary for implementing Immersive Analytics experiences on the Web, discuss scalability implications of our framework, and present a series of use case applications to demonstrate the various features of VRIA. Finally, we discuss current limitations of our framework, the lessons learned from its development, and outline further extensions.", "title": "VRIA: A Web-Based Framework for Creating Immersive Analytics Experiences", "normalizedTitle": "VRIA: A Web-Based Framework for Creating Immersive Analytics Experiences", "fno": "08954824", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualisation", "Hypermedia Markup Languages", "Internet", "Online Front Ends", "Virtual Reality", "VR Experiences", "Popular Visualization Libraries", "HTML Document Object Model", "Web V Rs Progressive Enhancement", "Experiences VRIA", "Open Standards Web Technologies", "Web Based Framework", "Creating Immersive Analytics Experiences", "Visualization Creation Workflow", "Open Standards Web Based Technologies", "Data Visualization", "Tools", "Virtual Reality", "Browsers", "Libraries", "Collaboration", "Three Dimensional Displays", "Immersive Analytics", "Virtual Reality", "Web Technologies" ], "authors": [ { "givenName": "Peter W. S.", "surname": "Butcher", "fullName": "Peter W. S. Butcher", "affiliation": "Department of Computer Science, Chester University, Chester, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Nigel W.", "surname": "John", "fullName": "Nigel W. John", "affiliation": "Department of Computer Science, Chester University, Chester, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Panagiotis D.", "surname": "Ritsos", "fullName": "Panagiotis D. Ritsos", "affiliation": "School of Computer Science and Electronic Engineering, Bangor University, Bangor, United Kingdom", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2021-07-01 00:00:00", "pubType": "trans", "pages": "3213-3225", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cw/2017/2089/0/2089a142", "title": "Building Immersive Data Visualizations for the Web", "doi": null, "abstractUrl": "/proceedings-article/cw/2017/2089a142/12OmNx6PiB6", "parentPublication": { "id": "proceedings/cw/2017/2089/0", "title": "2017 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bdva/2015/7343/0/07314296", "title": "Immersive Analytics", "doi": null, "abstractUrl": "/proceedings-article/bdva/2015/07314296/12OmNzVXNSO", "parentPublication": { "id": "proceedings/bdva/2015/7343/0", "title": "2015 Big Data Visual Analytics (BDVA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446144", "title": "Redirected Scene Rotation for Immersive Movie Experiences", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446144/13bd1fHrlRD", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446124", "title": "Virtual Immersion: Simulating Immersive Experiences in VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446124/13bd1fZBGcL", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/01/08500765", "title": "Enhancing Web-based Analytics Applications through Provenance", "doi": null, "abstractUrl": "/journal/tg/2019/01/08500765/17D45WYQJ6B", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2019/03/08698351", "title": "Immersive Analytics", "doi": null, "abstractUrl": "/magazine/cg/2019/03/08698351/19utOsQX9Nm", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csde/2022/5305/0/10089288", "title": "Comparing Customer Behaviours: Immersive Virtual Reality Store Experiences versus Web and Physical Store Experiences", "doi": null, "abstractUrl": "/proceedings-article/csde/2022/10089288/1M7LemRp5cY", "parentPublication": { "id": "proceedings/csde/2022/5305/0", "title": "2022 IEEE Asia-Pacific Conference on Computer Science and Data Engineering (CSDE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797845", "title": "Collaborative Data Analytics Using Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797845/1cJ0IJouHTi", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2019/5686/0/568600a603", "title": "Design and Implementation of a Web-Based Collaborative Authoring Tool for the Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/sitis/2019/568600a603/1j9xDvY2cEg", "parentPublication": { "id": "proceedings/sitis/2019/5686/0", "title": "2019 15th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090546", "title": "[DC] The Immersive Space to Think: Immersive Analytics for Multimedia Data", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090546/1jIxrquhCNO", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08954740", "articleId": "1gs4WiaCzYY", "__typename": "AdjacentArticleType" }, "next": { "fno": "08960398", "articleId": "1gC2pML2yuk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1tWJ8EdItri", "title": "July", "year": "2021", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1gC2pML2yuk", "doi": "10.1109/TVCG.2020.2967036", "abstract": "In this article, we present a data-driven approach for modeling and animation of 3D necks. Our method is based on a new neck animation model that decomposes the neck animation into local deformation caused by larynx motion and global deformation driven by head poses, facial expressions, and speech. A skinning model is introduced for modeling local deformation and underlying larynx motions, while the global neck deformation caused by each factor is modeled by its corrective blendshape set, respectively. Based on this neck model, we introduce a regression method to drive the larynx motion and neck deformation from speech. Both the neck model and the speech regressor are learned from a dataset of 3D neck animation sequences captured from different identities. Our neck model significantly improves the realism of facial animation and allows users to easily create plausible neck animations from speech and facial expressions. We verify our neck model and demonstrate its advantages in 3D neck tracking and animation.", "abstracts": [ { "abstractType": "Regular", "content": "In this article, we present a data-driven approach for modeling and animation of 3D necks. Our method is based on a new neck animation model that decomposes the neck animation into local deformation caused by larynx motion and global deformation driven by head poses, facial expressions, and speech. A skinning model is introduced for modeling local deformation and underlying larynx motions, while the global neck deformation caused by each factor is modeled by its corrective blendshape set, respectively. Based on this neck model, we introduce a regression method to drive the larynx motion and neck deformation from speech. Both the neck model and the speech regressor are learned from a dataset of 3D neck animation sequences captured from different identities. Our neck model significantly improves the realism of facial animation and allows users to easily create plausible neck animations from speech and facial expressions. We verify our neck model and demonstrate its advantages in 3D neck tracking and animation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this article, we present a data-driven approach for modeling and animation of 3D necks. Our method is based on a new neck animation model that decomposes the neck animation into local deformation caused by larynx motion and global deformation driven by head poses, facial expressions, and speech. A skinning model is introduced for modeling local deformation and underlying larynx motions, while the global neck deformation caused by each factor is modeled by its corrective blendshape set, respectively. Based on this neck model, we introduce a regression method to drive the larynx motion and neck deformation from speech. Both the neck model and the speech regressor are learned from a dataset of 3D neck animation sequences captured from different identities. Our neck model significantly improves the realism of facial animation and allows users to easily create plausible neck animations from speech and facial expressions. We verify our neck model and demonstrate its advantages in 3D neck tracking and animation.", "title": "Data-Driven 3D Neck Modeling and Animation", "normalizedTitle": "Data-Driven 3D Neck Modeling and Animation", "fno": "08960398", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Animation", "Deformation", "Face Recognition", "Necking", "Regression Analysis", "Underlying Larynx Motions", "Global Neck Deformation", "Neck Model", "Larynx Motion", "Facial Animation", "Plausible Neck Animations", "3 D Neck Tracking", "Data Driven 3 D Neck Modeling", "Data Driven Approach", "Neck Animation Model", "Local Deformation", "Skinning Model", "Neck", "Larynx", "Facial Animation", "Strain", "Solid Modeling", "Three Dimensional Displays", "Neck Modeling", "Neck Animation", "Speech Driven Animation" ], "authors": [ { "givenName": "Yilong", "surname": "Liu", "fullName": "Yilong Liu", "affiliation": "Tsinghua University, Beijing, P. R. China", "__typename": "ArticleAuthorType" }, { "givenName": "Chengwei", "surname": "Zheng", "fullName": "Chengwei Zheng", "affiliation": "Tsinghua University, Beijing, P. R. China", "__typename": "ArticleAuthorType" }, { "givenName": "Feng", "surname": "Xu", "fullName": "Feng Xu", "affiliation": "Tsinghua University, Beijing, P. R. China", "__typename": "ArticleAuthorType" }, { "givenName": "Xin", "surname": "Tong", "fullName": "Xin Tong", "affiliation": "Microsoft Research Asia, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Baining", "surname": "Guo", "fullName": "Baining Guo", "affiliation": "Microsoft Research Asia, Beijing, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2021-07-01 00:00:00", "pubType": "trans", "pages": "3226-3237", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iccsee/2012/4647/3/4647c434", "title": "A Survey of Computer Facial Animation Techniques", "doi": null, "abstractUrl": "/proceedings-article/iccsee/2012/4647c434/12OmNAXxXhU", "parentPublication": { "id": "proceedings/iccsee/2012/4647/3", "title": "Computer Science and Electronics Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2014/4717/0/06890554", "title": "Realtime speech-driven facial animation using Gaussian Mixture Models", "doi": null, "abstractUrl": "/proceedings-article/icmew/2014/06890554/12OmNBC8Ayh", "parentPublication": { "id": "proceedings/icmew/2014/4717/0", "title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ca/1996/7588/0/75880098", "title": "Facial Animation", "doi": null, "abstractUrl": "/proceedings-article/ca/1996/75880098/12OmNvT2oR2", "parentPublication": { "id": "proceedings/ca/1996/7588/0", "title": "Computer Animation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2011/348/0/06011861", "title": "Animation of generic 3D head models driven by speech", "doi": null, "abstractUrl": "/proceedings-article/icme/2011/06011861/12OmNviZlAw", "parentPublication": { "id": "proceedings/icme/2011/348/0", "title": "2011 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2017/0733/0/0733c328", "title": "Speech-Driven 3D Facial Animation with Implicit Emotional Awareness: A Deep Learning Approach", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2017/0733c328/12OmNxE2mG1", "parentPublication": { "id": "proceedings/cvprw/2017/0733/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2010/4166/0/4166a009", "title": "Expressive MPEG-4 Facial Animation Using Quadratic Deformation Models", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2010/4166a009/12OmNxH9Xgx", "parentPublication": { "id": "proceedings/cgiv/2010/4166/0", "title": "2010 Seventh International Conference on Computer Graphics, Imaging and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2000/0868/0/08680359", "title": "Control of Feature-Point-Driven Facial Animation Using a Hypothetical Face", "doi": null, "abstractUrl": "/proceedings-article/pg/2000/08680359/12OmNyqzLXk", "parentPublication": { "id": "proceedings/pg/2000/0868/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/11/ttg2012111915", "title": "A Statistical Quality Model for Data-Driven Speech Animation", "doi": null, "abstractUrl": "/journal/tg/2012/11/ttg2012111915/13rRUIIVlkf", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798145", "title": "Speech-Driven Facial Animation by LSTM-RNN for Communication Use", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798145/1cJ0YZ9Bfgs", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09524465", "title": "Geometry-Guided Dense Perspective Network for Speech-Driven Facial Animation", "doi": null, "abstractUrl": "/journal/tg/2022/12/09524465/1wpqCsqBU6Q", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08954824", "articleId": "1gs4VkbA9LG", "__typename": "AdjacentArticleType" }, "next": { "fno": "08966278", "articleId": "1gNEBsadHP2", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1tWJjYnEis0", "name": "ttg202107-08960398s1-supp1-2967036.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202107-08960398s1-supp1-2967036.mp4", "extension": "mp4", "size": "192 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1tWJ8EdItri", "title": "July", "year": "2021", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1gNEBsadHP2", "doi": "10.1109/TVCG.2020.2968062", "abstract": "In this article we introduce a differentiable rendering module which allows neural networks to efficiently process 3D data. The module is composed of continuous piecewise differentiable functions defined as a sensor array of cells embedded in 3D space. Our module is learnable and can be easily integrated into neural networks allowing to optimize data rendering towards specific learning tasks using gradient based methods in an end-to-end fashion. Essentially, the module's sensor cells are allowed to transform independently and locally focus and sense different parts of the 3D data. Thus, through their optimization process, cells learn to focus on important parts of the data, bypassing occlusions, clutter, and noise. Since sensor cells originally lie on a grid, this equals to a highly non-linear rendering of the scene into a 2D image. Our module performs especially well in presence of clutter and occlusions as well as dealing with non-linear deformations to improve classification accuracy through proper rendering of the data. In our experiments, we apply our module in various learning tasks and demonstrate that using our rendering module we accomplish efficient classification, localization, and segmentation tasks on 2D/3D cluttered and non-cluttered data.", "abstracts": [ { "abstractType": "Regular", "content": "In this article we introduce a differentiable rendering module which allows neural networks to efficiently process 3D data. The module is composed of continuous piecewise differentiable functions defined as a sensor array of cells embedded in 3D space. Our module is learnable and can be easily integrated into neural networks allowing to optimize data rendering towards specific learning tasks using gradient based methods in an end-to-end fashion. Essentially, the module's sensor cells are allowed to transform independently and locally focus and sense different parts of the 3D data. Thus, through their optimization process, cells learn to focus on important parts of the data, bypassing occlusions, clutter, and noise. Since sensor cells originally lie on a grid, this equals to a highly non-linear rendering of the scene into a 2D image. Our module performs especially well in presence of clutter and occlusions as well as dealing with non-linear deformations to improve classification accuracy through proper rendering of the data. In our experiments, we apply our module in various learning tasks and demonstrate that using our rendering module we accomplish efficient classification, localization, and segmentation tasks on 2D/3D cluttered and non-cluttered data.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this article we introduce a differentiable rendering module which allows neural networks to efficiently process 3D data. The module is composed of continuous piecewise differentiable functions defined as a sensor array of cells embedded in 3D space. Our module is learnable and can be easily integrated into neural networks allowing to optimize data rendering towards specific learning tasks using gradient based methods in an end-to-end fashion. Essentially, the module's sensor cells are allowed to transform independently and locally focus and sense different parts of the 3D data. Thus, through their optimization process, cells learn to focus on important parts of the data, bypassing occlusions, clutter, and noise. Since sensor cells originally lie on a grid, this equals to a highly non-linear rendering of the scene into a 2D image. Our module performs especially well in presence of clutter and occlusions as well as dealing with non-linear deformations to improve classification accuracy through proper rendering of the data. In our experiments, we apply our module in various learning tasks and demonstrate that using our rendering module we accomplish efficient classification, localization, and segmentation tasks on 2D/3D cluttered and non-cluttered data.", "title": "A Non-Linear Differentiable CNN-Rendering Module for 3D Data Enhancement", "normalizedTitle": "A Non-Linear Differentiable CNN-Rendering Module for 3D Data Enhancement", "fno": "08966278", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Feature Extraction", "Gradient Methods", "Image Segmentation", "Learning Artificial Intelligence", "Neural Nets", "Optimisation", "Pattern Classification", "Radar Clutter", "Rendering Computer Graphics", "Nonlinear Differentiable CNN Rendering Module", "Differentiable Rendering Module", "Neural Networks", "Continuous Piecewise Differentiable Functions", "Sensor Array", "Data Rendering", "Specific Learning Tasks", "Gradient Based Methods", "Sensor Cells", "Optimization Process", "Nonlinear Rendering", "Nonlinear Deformations", "Proper Rendering", "Three Dimensional Displays", "Rendering Computer Graphics", "Shape", "Task Analysis", "Two Dimensional Displays", "Clutter", "Neural Networks", "3 D Convolutional Neural Networks", "Shape Modeling", "Noise Removal" ], "authors": [ { "givenName": "Yonatan", "surname": "Svirsky", "fullName": "Yonatan Svirsky", "affiliation": "Department of Computer Science, Ben-Gurion University of the Negev, Beer-Sheva, Israel", "__typename": "ArticleAuthorType" }, { "givenName": "Andrei", "surname": "Sharf", "fullName": "Andrei Sharf", "affiliation": "Department of Computer Science, Ben-Gurion University of the Negev, Beer-Sheva, Israel", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2021-07-01 00:00:00", "pubType": "trans", "pages": "3238-3249", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tg/5555/01/09689957", "title": "Efficient Specular Glints Rendering with Differentiable Regularization", "doi": null, "abstractUrl": "/journal/tg/5555/01/09689957/1AlCfIlPhfy", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2022/0915/0/091500a628", "title": "Shadow Art Revisited: A Differentiable Rendering Based Approach", "doi": null, "abstractUrl": "/proceedings-article/wacv/2022/091500a628/1B12RNuAqvS", "parentPublication": { "id": "proceedings/wacv/2022/0915/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200g068", "title": "Differentiable Surface Rendering via Non-Differentiable Sampling", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200g068/1BmFpmQFMKA", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200l1230", "title": "Learning to Regress Bodies from Images using Differentiable Semantic Rendering", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200l1230/1BmLryCiwjm", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aemcse/2022/8474/0/847400a167", "title": "3D Communication System Integrating 3D Reconstruction and Rendering Display", "doi": null, "abstractUrl": "/proceedings-article/aemcse/2022/847400a167/1IlObcruRxK", "parentPublication": { "id": "proceedings/aemcse/2022/8474/0", "title": "2022 5th International Conference on Advanced Electronic Materials, Computers and Software Engineering (AEMCSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300h707", "title": "Soft Rasterizer: A Differentiable Renderer for Image-Based 3D Reasoning", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300h707/1hVlfIgUyLm", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/01/09134794", "title": "A General Differentiable Mesh Renderer for Image-Based 3D Reasoning", "doi": null, "abstractUrl": "/journal/tp/2022/01/09134794/1lgLr0OBt9C", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800c016", "title": "DIST: Rendering Deep Implicit Signed Distance Function With Differentiable Sphere Tracing", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800c016/1m3nuEI1jJm", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800d501", "title": "Differentiable Volumetric Rendering: Learning Implicit 3D Representations Without 3D Supervision", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800d501/1m3nwXQXEAw", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2020/8128/0/812800a230", "title": "Cycle-Consistent Generative Rendering for 2D-3D Modality Translation", "doi": null, "abstractUrl": "/proceedings-article/3dv/2020/812800a230/1qyxjJVmLQI", "parentPublication": { "id": "proceedings/3dv/2020/8128/0", "title": "2020 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08960398", "articleId": "1gC2pML2yuk", "__typename": "AdjacentArticleType" }, "next": { "fno": "08964443", "articleId": "1gLZSnCp3Ko", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1tWJ8EdItri", "title": "July", "year": "2021", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1gLZSnCp3Ko", "doi": "10.1109/TVCG.2020.2968433", "abstract": "We present DeepSketchHair, a deep learning based tool for modeling of 3D hair from 2D sketches. Given a 3D bust model as reference, our sketching system takes as input a user-drawn sketch (consisting of hair contour and a few strokes indicating the hair growing direction within a hair region), and automatically generates a 3D hair model, matching the input sketch. The key enablers of our system are three carefully designed neural networks, namely, S2ONet, which converts an input sketch to a dense 2D hair orientation field; O2VNet, which maps the 2D orientation field to a 3D vector field; and V2VNet, which updates the 3D vector field with respect to the new sketches, enabling hair editing with additional sketches in new views. All the three networks are trained with synthetic data generated from a 3D hairstyle database. We demonstrate the effectiveness and expressiveness of our tool using a variety of hairstyles and also compare our method with prior art.", "abstracts": [ { "abstractType": "Regular", "content": "We present DeepSketchHair, a deep learning based tool for modeling of 3D hair from 2D sketches. Given a 3D bust model as reference, our sketching system takes as input a user-drawn sketch (consisting of hair contour and a few strokes indicating the hair growing direction within a hair region), and automatically generates a 3D hair model, matching the input sketch. The key enablers of our system are three carefully designed neural networks, namely, S2ONet, which converts an input sketch to a dense 2D hair orientation field; O2VNet, which maps the 2D orientation field to a 3D vector field; and V2VNet, which updates the 3D vector field with respect to the new sketches, enabling hair editing with additional sketches in new views. All the three networks are trained with synthetic data generated from a 3D hairstyle database. We demonstrate the effectiveness and expressiveness of our tool using a variety of hairstyles and also compare our method with prior art.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present DeepSketchHair, a deep learning based tool for modeling of 3D hair from 2D sketches. Given a 3D bust model as reference, our sketching system takes as input a user-drawn sketch (consisting of hair contour and a few strokes indicating the hair growing direction within a hair region), and automatically generates a 3D hair model, matching the input sketch. The key enablers of our system are three carefully designed neural networks, namely, S2ONet, which converts an input sketch to a dense 2D hair orientation field; O2VNet, which maps the 2D orientation field to a 3D vector field; and V2VNet, which updates the 3D vector field with respect to the new sketches, enabling hair editing with additional sketches in new views. All the three networks are trained with synthetic data generated from a 3D hairstyle database. We demonstrate the effectiveness and expressiveness of our tool using a variety of hairstyles and also compare our method with prior art.", "title": "DeepSketchHair: Deep Sketch-Based 3D Hair Modeling", "normalizedTitle": "DeepSketchHair: Deep Sketch-Based 3D Hair Modeling", "fno": "08964443", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computational Geometry", "Computer Animation", "Deep Learning Artificial Intelligence", "Feature Extraction", "Image Colour Analysis", "Neural Nets", "Realistic Images", "Solid Modelling", "Hair Contour", "Hair Growing Direction", "Hair Region", "Neural Networks", "S 2 O Net", "O 2 V Net", "2 D Orientation Field", "3 D Vector Field", "V 2 V Net", "3 D Hairstyle Database", "Deep Sketch Hair", "Deep Learning Based Tool", "3 D Bust Model", "Sketching System", "User Drawn Sketch", "Deep Sketch Based 3 D Hair Modeling", "Hair", "Three Dimensional Displays", "Solid Modeling", "Two Dimensional Displays", "Computational Modeling", "Deep Learning", "Neural Networks", "Sketch Based Hair Modeling", "3 D Volumetric Structure", "Deep Learning", "Generative Adversarial Networks" ], "authors": [ { "givenName": "Yuefan", "surname": "Shen", "fullName": "Yuefan Shen", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Changgeng", "surname": "Zhang", "fullName": "Changgeng Zhang", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Hongbo", "surname": "Fu", "fullName": "Hongbo Fu", "affiliation": "School of Creative Media, City University of Hong Kong, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Kun", "surname": "Zhou", "fullName": "Kun Zhou", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Youyi", "surname": "Zheng", "fullName": "Youyi Zheng", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2021-07-01 00:00:00", "pubType": "trans", "pages": "3250-3263", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2017/0457/0/0457d615", "title": "Learning Barycentric Representations of 3D Shapes for Sketch-Based 3D Shape Retrieval", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457d615/12OmNB0FxiX", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2017/06/mcg2017060088", "title": "Sketch-Based Articulated 3D Shape Retrieval", "doi": null, "abstractUrl": "/magazine/cg/2017/06/mcg2017060088/13rRUwfqpG7", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2018/9497/0/949700a311", "title": "Sketch-Based Shape Retrieval via Multi-view Attention and Generalized Similarity", "doi": null, "abstractUrl": "/proceedings-article/icdh/2018/949700a311/17D45VObpQZ", "parentPublication": { "id": "proceedings/icdh/2018/9497/0", "title": "2018 7th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2018/8497/0/849700a008", "title": "Data-Driven Hair Modeling from a Single Image", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2018/849700a008/1a3x7jLsFPi", "parentPublication": { "id": "proceedings/icvrv/2018/8497/0", "title": "2018 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2016/4847/0/07900083", "title": "3D sketch-based 3D model retrieval with convolutional neural network", "doi": null, "abstractUrl": "/proceedings-article/icpr/2016/07900083/1gysq8EnfHi", "parentPublication": { "id": "proceedings/icpr/2016/4847/0", "title": "2016 23rd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/08/09007505", "title": "Sketch Augmentation-Driven Shape Retrieval Learning Framework Based on Convolutional Neural Networks", "doi": null, "abstractUrl": "/journal/tg/2021/08/09007505/1hJKlMJzueI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nicoint/2020/8771/0/09122356", "title": "Viewpoint Selection for Sketch-based Hairstyle Modeling", "doi": null, "abstractUrl": "/proceedings-article/nicoint/2020/09122356/1kRSfSP7OpO", "parentPublication": { "id": "proceedings/nicoint/2020/8771/0", "title": "2020 Nicograph International (NicoInt)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2020/1331/0/09102925", "title": "Cross-Modal Guidance Network For Sketch-Based 3d Shape Retrieval", "doi": null, "abstractUrl": "/proceedings-article/icme/2020/09102925/1kwqTrDSXF6", "parentPublication": { "id": "proceedings/icme/2020/1331/0", "title": "2020 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2019/4752/0/09212824", "title": "Automatic Hair Modeling from One Image", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2019/09212824/1nHRUrDMgE0", "parentPublication": { "id": "proceedings/icvrv/2019/4752/0", "title": "2019 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2020/8128/0/812800a081", "title": "Towards 3D VR-Sketch to 3D Shape Retrieval", "doi": null, "abstractUrl": "/proceedings-article/3dv/2020/812800a081/1qyxlDtR0Ji", "parentPublication": { "id": "proceedings/3dv/2020/8128/0", "title": "2020 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08966278", "articleId": "1gNEBsadHP2", "__typename": "AdjacentArticleType" }, "next": { "fno": "08967163", "articleId": "1gPjyn904OA", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1tWJ8EdItri", "title": "July", "year": "2021", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1gPjyn904OA", "doi": "10.1109/TVCG.2020.2968911", "abstract": "Stress tensor fields play a central role in solid mechanics studies, but their visualization in 3D space remains challenging as the information-dense multi-variate tensor needs to be sampled in 3D space while avoiding clutter. Taking cues from current tensor visualizations, we adapted glyph-based visualization for stress tensors in 3D space. We also developed a testing framework and performed user studies to evaluate the various glyph-based tensor visualizations for objective accuracy measures, and subjective user feedback for each visualization method. To represent the stress tensor, we color encoded the original superquadric glyph, and in the user study, we compared it to superquadric glyphs developed for second-order symmetric tensors. We found that color encoding improved the user accuracy measures, while the users also rated our method the highest. We compared our method of placing stress tensor glyphs on displacement streamlines to the glyph placement on a 3D grid. In the visualization, we modified the glyph to show both the stress tensor and the displacement vector at each sample point. The participants preferred our method of glyph placement on displacement streamlines as it highlighted the underlying continuous structure in the tensor field.", "abstracts": [ { "abstractType": "Regular", "content": "Stress tensor fields play a central role in solid mechanics studies, but their visualization in 3D space remains challenging as the information-dense multi-variate tensor needs to be sampled in 3D space while avoiding clutter. Taking cues from current tensor visualizations, we adapted glyph-based visualization for stress tensors in 3D space. We also developed a testing framework and performed user studies to evaluate the various glyph-based tensor visualizations for objective accuracy measures, and subjective user feedback for each visualization method. To represent the stress tensor, we color encoded the original superquadric glyph, and in the user study, we compared it to superquadric glyphs developed for second-order symmetric tensors. We found that color encoding improved the user accuracy measures, while the users also rated our method the highest. We compared our method of placing stress tensor glyphs on displacement streamlines to the glyph placement on a 3D grid. In the visualization, we modified the glyph to show both the stress tensor and the displacement vector at each sample point. The participants preferred our method of glyph placement on displacement streamlines as it highlighted the underlying continuous structure in the tensor field.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Stress tensor fields play a central role in solid mechanics studies, but their visualization in 3D space remains challenging as the information-dense multi-variate tensor needs to be sampled in 3D space while avoiding clutter. Taking cues from current tensor visualizations, we adapted glyph-based visualization for stress tensors in 3D space. We also developed a testing framework and performed user studies to evaluate the various glyph-based tensor visualizations for objective accuracy measures, and subjective user feedback for each visualization method. To represent the stress tensor, we color encoded the original superquadric glyph, and in the user study, we compared it to superquadric glyphs developed for second-order symmetric tensors. We found that color encoding improved the user accuracy measures, while the users also rated our method the highest. We compared our method of placing stress tensor glyphs on displacement streamlines to the glyph placement on a 3D grid. In the visualization, we modified the glyph to show both the stress tensor and the displacement vector at each sample point. The participants preferred our method of glyph placement on displacement streamlines as it highlighted the underlying continuous structure in the tensor field.", "title": "Visualization of 3D Stress Tensor Fields Using Superquadric Glyphs on Displacement Streamlines", "normalizedTitle": "Visualization of 3D Stress Tensor Fields Using Superquadric Glyphs on Displacement Streamlines", "fno": "08967163", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Continuum Mechanics", "Data Visualisation", "Materials Science Computing", "Tensors", "3 D Grid", "3 D Stress Tensor Field Visualization", "Glyph Placement", "Stress Tensor Glyphs", "Second Order Symmetric Tensors", "Glyph Based Tensor Visualizations", "Information Dense Multivariate Tensor", "Solid Mechanics", "Displacement Streamlines", "Superquadric Glyphs", "Tensors", "Stress", "Visualization", "Three Dimensional Displays", "Data Visualization", "Clutter", "Solids", "3 D Stress Tensor Field", "Visualization", "Glyph", "Glyph Placement", "Virtual Reality", "User Study" ], "authors": [ { "givenName": "Mohak", "surname": "Patel", "fullName": "Mohak Patel", "affiliation": "Department of Computer Science, Brown University, Providence, RI, USA", "__typename": "ArticleAuthorType" }, { "givenName": "David H.", "surname": "Laidlaw", "fullName": "David H. Laidlaw", "affiliation": "Department of Computer Science, Brown University, Providence, RI, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2021-07-01 00:00:00", "pubType": "trans", "pages": "3264-3276", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/scivis/2015/9785/0/07429504", "title": "3D superquadric glyphs for visualizing myocardial motion", "doi": null, "abstractUrl": "/proceedings-article/scivis/2015/07429504/12OmNrIaemh", "parentPublication": { "id": "proceedings/scivis/2015/9785/0", "title": "2015 IEEE Scientific Visualization Conference (SciVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/27660005", "title": "Visualizing Tensor Fields in Geomechanics", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660005/12OmNvpewaO", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2004/8788/0/87880369", "title": "Visualization of Salt-Induced Stress Perturbations", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2004/87880369/12OmNvqEvJq", "parentPublication": { "id": "proceedings/ieee-vis/2004/8788/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532771", "title": "Exploring 2D tensor fields using stress nets", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532771/12OmNzmtWye", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/05/v1197", "title": "Superellipsoid-based, Real Symmetric Traceless Tensor Glyphs Motivated by Nematic Liquid Crystal Alignment Visualization", "doi": null, "abstractUrl": "/journal/tg/2006/05/v1197/13rRUNvgyWd", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/01/07192722", "title": "Glyph-Based Comparative Visualization for Diffusion Tensor Fields", "doi": null, "abstractUrl": "/journal/tg/2016/01/07192722/13rRUx0gefn", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/06/ttg2010061595", "title": "Superquadric Glyphs for Symmetric Second-Order Tensors", "doi": null, "abstractUrl": "/journal/tg/2010/06/ttg2010061595/13rRUxZzAhA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/08/ttg2013081331", "title": "Representing Flow Patterns by Using Streamlines with Glyphs", "doi": null, "abstractUrl": "/journal/tg/2013/08/ttg2013081331/13rRUxly9dT", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2005/05/v0508", "title": "Visualization of Geologic Stress Perturbations Using Mohr Diagrams", "doi": null, "abstractUrl": "/journal/tg/2005/05/v0508/13rRUyeTVhT", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/09/09067088", "title": "AgentVis: Visual Analysis of Agent Behavior With Hierarchical Glyphs", "doi": null, "abstractUrl": "/journal/tg/2021/09/09067088/1j1lyTz50k0", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08964443", "articleId": "1gLZSnCp3Ko", "__typename": "AdjacentArticleType" }, "next": { "fno": "08967011", "articleId": "1gPjyDVBxF6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1tWJbbdSB1K", "name": "ttg202107-08967163s1-supp2-2968911.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202107-08967163s1-supp2-2968911.mp4", "extension": "mp4", "size": "63.3 MB", "__typename": "WebExtraType" }, { "id": "1tWJavkCURq", "name": "ttg202107-08967163s1-supp1-2968911.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202107-08967163s1-supp1-2968911.pdf", "extension": "pdf", "size": "84.7 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1tWJ8EdItri", "title": "July", "year": "2021", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1gPjyDVBxF6", "doi": "10.1109/TVCG.2020.2969181", "abstract": "Advances in Virtual Reality technology have enabled physical walking in virtual environments. While most Virtual Reality systems render stereoscopic images to users, the implication of binocular viewing with respect to the performance of human walking in virtual environments remains largely unknown. In the present study, we conducted two walking experiments in virtual environments using a linear treadmill and a novel projected display known as the Wide Immersive Stereo Environment (WISE) to study the role of binocular viewing in virtual locomotion. The first experiment investigated the walking performance of people stepping over obstacles while the second experiment focused on a scenario on stepping over gaps. Both experiments were conducted under both stereoscopic viewing and non-stereoscopic viewing conditions. By analysing the gait parameters, we found that binocular viewing helped people to make more accurate movements to step over obstacles and gaps in virtual locomotion.", "abstracts": [ { "abstractType": "Regular", "content": "Advances in Virtual Reality technology have enabled physical walking in virtual environments. While most Virtual Reality systems render stereoscopic images to users, the implication of binocular viewing with respect to the performance of human walking in virtual environments remains largely unknown. In the present study, we conducted two walking experiments in virtual environments using a linear treadmill and a novel projected display known as the Wide Immersive Stereo Environment (WISE) to study the role of binocular viewing in virtual locomotion. The first experiment investigated the walking performance of people stepping over obstacles while the second experiment focused on a scenario on stepping over gaps. Both experiments were conducted under both stereoscopic viewing and non-stereoscopic viewing conditions. By analysing the gait parameters, we found that binocular viewing helped people to make more accurate movements to step over obstacles and gaps in virtual locomotion.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Advances in Virtual Reality technology have enabled physical walking in virtual environments. While most Virtual Reality systems render stereoscopic images to users, the implication of binocular viewing with respect to the performance of human walking in virtual environments remains largely unknown. In the present study, we conducted two walking experiments in virtual environments using a linear treadmill and a novel projected display known as the Wide Immersive Stereo Environment (WISE) to study the role of binocular viewing in virtual locomotion. The first experiment investigated the walking performance of people stepping over obstacles while the second experiment focused on a scenario on stepping over gaps. Both experiments were conducted under both stereoscopic viewing and non-stereoscopic viewing conditions. By analysing the gait parameters, we found that binocular viewing helped people to make more accurate movements to step over obstacles and gaps in virtual locomotion.", "title": "The Role of Binocular Vision in Avoiding Virtual Obstacles While Walking", "normalizedTitle": "The Role of Binocular Vision in Avoiding Virtual Obstacles While Walking", "fno": "08967011", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Rendering Computer Graphics", "Stereo Image Processing", "Three Dimensional Displays", "Virtual Reality", "Virtual Environments", "Stereoscopic Images", "Binocular Viewing", "Human Walking", "Walking Experiments", "Virtual Locomotion", "Walking Performance", "Stereoscopic Viewing", "Nonstereoscopic Viewing Conditions", "Binocular Vision", "Virtual Obstacles", "Virtual Reality Technology", "Wide Immersive Stereo Environment", "Legged Locomotion", "Stereo Image Processing", "Foot", "Virtual Environments", "Tracking", "Rendering Computer Graphics", "Magnetic Heads", "Stereopsis", "Virtual Locomotion", "Virtual Environments" ], "authors": [ { "givenName": "Jingbo", "surname": "Zhao", "fullName": "Jingbo Zhao", "affiliation": "College of Information and Electrical Engineering, China Agricultural University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Robert S.", "surname": "Allison", "fullName": "Robert S. Allison", "affiliation": "Department of Electrical Engineering and Computer Science, York University, Toronto, ON, Canada", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2021-07-01 00:00:00", "pubType": "trans", "pages": "3277-3288", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2015/1727/0/07223431", "title": "Walking recording and experience system by Visual Psychophysics Lab", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223431/12OmNB1NVNQ", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2012/1611/0/06238904", "title": "The measurement of eyestrain caused from diverse binocular disparities, viewing time and display sizes in watching stereoscopic 3D content", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2012/06238904/12OmNqJHFuT", "parentPublication": { "id": "proceedings/cvprw/2012/1611/0", "title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446479", "title": "Adopting the Roll Manipulation for Redirected Walking", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446479/13bd1eSlys4", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08448288", "title": "Experiencing an Invisible World War I Battlefield Through Narrative-Driven Redirected Walking in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08448288/13bd1fZBGdu", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446216", "title": "I Can See on My Feet While Walking: Sensitivity to Translation Gains with Visible Feet", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446216/13bd1gJ1v0k", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sive/2018/5713/0/08577177", "title": "Influence of hearing your steps and environmental sounds in VR while walking", "doi": null, "abstractUrl": "/proceedings-article/sive/2018/08577177/17D45XoXP3w", "parentPublication": { "id": "proceedings/sive/2018/5713/0", "title": "2018 IEEE 4th VR Workshop on Sonic Interactions for Virtual Environments (SIVE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/culture-and-computing/2017/1135/0/08227335", "title": "Walk through a Museum with Binocular Stereo Effect and Spherical Panorama Views", "doi": null, "abstractUrl": "/proceedings-article/culture-and-computing/2017/08227335/17D45XtvpdY", "parentPublication": { "id": "proceedings/culture-and-computing/2017/1135/0", "title": "2017 International Conference on Culture and Computing (Culture and Computing)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797751", "title": "Improving Walking in Place Methods with Individualization and Deep Networks", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797751/1cJ0WSuJ27e", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798345", "title": "Investigation of Visual Self-Representation for a Walking-in-Place Navigation System in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798345/1cJ1hpkUgHS", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090453", "title": "Perception of Walking Self-body Avatar Enhances Virtual-walking Sensation", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090453/1jIxoojmMy4", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08967163", "articleId": "1gPjyn904OA", "__typename": "AdjacentArticleType" }, "next": { "fno": "08967166", "articleId": "1gPjyNWFSgg", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1tWJ8EdItri", "title": "July", "year": "2021", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1gPjyNWFSgg", "doi": "10.1109/TVCG.2020.2969185", "abstract": "Adversarial examples, generated by adding small but intentionally imperceptible perturbations to normal examples, can mislead deep neural networks (DNNs) to make incorrect predictions. Although much work has been done on both adversarial attack and defense, a fine-grained understanding of adversarial examples is still lacking. To address this issue, we present a visual analysis method to explain why adversarial examples are misclassified. The key is to compare and analyze the datapaths of both the adversarial and normal examples. A datapath is a group of critical neurons along with their connections. We formulate the datapath extraction as a subset selection problem and solve it by constructing and training a neural network. A multi-level visualization consisting of a network-level visualization of data flows, a layer-level visualization of feature maps, and a neuron-level visualization of learned features, has been designed to help investigate how datapaths of adversarial and normal examples diverge and merge in the prediction process. A quantitative evaluation and a case study were conducted to demonstrate the promise of our method to explain the misclassification of adversarial examples.", "abstracts": [ { "abstractType": "Regular", "content": "Adversarial examples, generated by adding small but intentionally imperceptible perturbations to normal examples, can mislead deep neural networks (DNNs) to make incorrect predictions. Although much work has been done on both adversarial attack and defense, a fine-grained understanding of adversarial examples is still lacking. To address this issue, we present a visual analysis method to explain why adversarial examples are misclassified. The key is to compare and analyze the datapaths of both the adversarial and normal examples. A datapath is a group of critical neurons along with their connections. We formulate the datapath extraction as a subset selection problem and solve it by constructing and training a neural network. A multi-level visualization consisting of a network-level visualization of data flows, a layer-level visualization of feature maps, and a neuron-level visualization of learned features, has been designed to help investigate how datapaths of adversarial and normal examples diverge and merge in the prediction process. A quantitative evaluation and a case study were conducted to demonstrate the promise of our method to explain the misclassification of adversarial examples.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Adversarial examples, generated by adding small but intentionally imperceptible perturbations to normal examples, can mislead deep neural networks (DNNs) to make incorrect predictions. Although much work has been done on both adversarial attack and defense, a fine-grained understanding of adversarial examples is still lacking. To address this issue, we present a visual analysis method to explain why adversarial examples are misclassified. The key is to compare and analyze the datapaths of both the adversarial and normal examples. A datapath is a group of critical neurons along with their connections. We formulate the datapath extraction as a subset selection problem and solve it by constructing and training a neural network. A multi-level visualization consisting of a network-level visualization of data flows, a layer-level visualization of feature maps, and a neuron-level visualization of learned features, has been designed to help investigate how datapaths of adversarial and normal examples diverge and merge in the prediction process. A quantitative evaluation and a case study were conducted to demonstrate the promise of our method to explain the misclassification of adversarial examples.", "title": "Analyzing the Noise Robustness of Deep Neural Networks", "normalizedTitle": "Analyzing the Noise Robustness of Deep Neural Networks", "fno": "08967166", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualisation", "Deep Learning Artificial Intelligence", "Network Level Visualization", "Layer Level Visualization", "Neuron Level Visualization", "Datapath", "Normal Examples", "Adversarial Examples", "Deep Neural Networks", "Adversarial Attack", "Visual Analysis Method", "Neural Network", "Multilevel Visualization", "Neurons", "Visualization", "Data Visualization", "Feature Extraction", "Training", "Merging", "Biological Neural Networks", "Robustness", "Deep Neural Networks", "Adversarial Examples", "Explainable Machine Learning" ], "authors": [ { "givenName": "Kelei", "surname": "Cao", "fullName": "Kelei Cao", "affiliation": "School of Software, BNRist, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Mengchen", "surname": "Liu", "fullName": "Mengchen Liu", "affiliation": "Microsoft, Redmond, WA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Hang", "surname": "Su", "fullName": "Hang Su", "affiliation": "Department of Computer Science and Technology, Institute for AI, THBI Lab, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Jing", "surname": "Wu", "fullName": "Jing Wu", "affiliation": "Cardiff University, Cardiff, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Jun", "surname": "Zhu", "fullName": "Jun Zhu", "affiliation": "Department of Computer Science and Technology, Institute for AI, THBI Lab, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Shixia", "surname": "Liu", "fullName": "Shixia Liu", "affiliation": "School of Software, BNRist, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2021-07-01 00:00:00", "pubType": "trans", "pages": "3289-3304", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vizsec/2017/2693/0/08062202", "title": "Adversarial-Playground: A visualization suite showing how adversarial examples fool deep learning", "doi": null, "abstractUrl": "/proceedings-article/vizsec/2017/08062202/12OmNBKmXmJ", "parentPublication": { "id": "proceedings/vizsec/2017/2693/0", "title": "2017 IEEE Symposium on Visualization for Cyber Security (VizSec)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmla/2016/6167/0/07838124", "title": "Assessing Threat of Adversarial Examples on Deep Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/icmla/2016/07838124/12OmNqHItuw", "parentPublication": { "id": "proceedings/icmla/2016/6167/0", "title": "2016 15th IEEE International Conference on Machine Learning and Applications (ICMLA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sp/2017/5533/0/07958570", "title": "Towards Evaluating the Robustness of Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/sp/2017/07958570/12OmNviHK8t", "parentPublication": { "id": "proceedings/sp/2017/5533/0", "title": "2017 IEEE Symposium on Security and Privacy (SP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2018/1424/0/142401a180", "title": "Visualizing Deep Neural Networks for Text Analytics", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2018/142401a180/12OmNwDACu7", "parentPublication": { "id": "proceedings/pacificvis/2018/1424/0", "title": "2018 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200h476", "title": "Towards Robustness of Deep Neural Networks via Regularization", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200h476/1BmIAJt1ieI", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aitest/2019/0492/0/049200a089", "title": "Behavior Pattern-Driven Test Case Selection for Deep Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/aitest/2019/049200a089/1aIROMz9QpW", "parentPublication": { "id": "proceedings/aitest/2019/0492/0", "title": "2019 IEEE International Conference On Artificial Intelligence Testing (AITest)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2018/6861/0/08802509", "title": "Analyzing the Noise Robustness of Deep Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/vast/2018/08802509/1cJ6WWAb0wo", "parentPublication": { "id": "proceedings/vast/2018/6861/0", "title": "2018 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/spw/2019/3508/0/350800a001", "title": "On the Robustness of Deep K-Nearest Neighbors", "doi": null, "abstractUrl": "/proceedings-article/spw/2019/350800a001/1dx8yAChWCc", "parentPublication": { "id": "proceedings/spw/2019/3508/0", "title": "2019 IEEE Security and Privacy Workshops (SPW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093609", "title": "Multi-way Encoding for Robustness", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093609/1jPbs4meEH6", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse/2020/7121/0/712100a714", "title": "ReluDiff: Differential Verification of Deep Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/icse/2020/712100a714/1pK5gC8SLzW", "parentPublication": { "id": "proceedings/icse/2020/7121/0", "title": "2020 IEEE/ACM 42nd International Conference on Software Engineering (ICSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08967011", "articleId": "1gPjyDVBxF6", "__typename": "AdjacentArticleType" }, "next": { "fno": "08974422", "articleId": "1gZgXizpprG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1tWJgEtWdGw", "name": "ttg202107-08967166s1-supp1-2969185.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202107-08967166s1-supp1-2969185.mp4", "extension": "mp4", "size": "49.2 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1tWJ8EdItri", "title": "July", "year": "2021", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1gZgXizpprG", "doi": "10.1109/TVCG.2020.2970045", "abstract": "We present prominent structures in video, a representation of visually strong, spatially sparse and temporally stable structural units, for use in video analysis and editing. With a novel quality measurement of prominent structures in video, we develop a general framework for prominent structure computation, and an efficient hierarchical structure alignment algorithm between a pair of videos. The prominent structural unit map is proposed to encode both binary prominence guidance and numerical strength and geometry details for each video frame. Even though the detailed appearance of videos could be visually different, the proposed alignment algorithm can find matched prominent structure sub-volumes. Prominent structures in video support a wide range of video analysis and editing applications including graphic match-cut between successive videos, instant cut editing, finding transition portals from a video collection, structure-aware video re-ranking, visualizing human action differences, etc.", "abstracts": [ { "abstractType": "Regular", "content": "We present prominent structures in video, a representation of visually strong, spatially sparse and temporally stable structural units, for use in video analysis and editing. With a novel quality measurement of prominent structures in video, we develop a general framework for prominent structure computation, and an efficient hierarchical structure alignment algorithm between a pair of videos. The prominent structural unit map is proposed to encode both binary prominence guidance and numerical strength and geometry details for each video frame. Even though the detailed appearance of videos could be visually different, the proposed alignment algorithm can find matched prominent structure sub-volumes. Prominent structures in video support a wide range of video analysis and editing applications including graphic match-cut between successive videos, instant cut editing, finding transition portals from a video collection, structure-aware video re-ranking, visualizing human action differences, etc.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present prominent structures in video, a representation of visually strong, spatially sparse and temporally stable structural units, for use in video analysis and editing. With a novel quality measurement of prominent structures in video, we develop a general framework for prominent structure computation, and an efficient hierarchical structure alignment algorithm between a pair of videos. The prominent structural unit map is proposed to encode both binary prominence guidance and numerical strength and geometry details for each video frame. Even though the detailed appearance of videos could be visually different, the proposed alignment algorithm can find matched prominent structure sub-volumes. Prominent structures in video support a wide range of video analysis and editing applications including graphic match-cut between successive videos, instant cut editing, finding transition portals from a video collection, structure-aware video re-ranking, visualizing human action differences, etc.", "title": "Prominent Structures for Video Analysis and Editing", "normalizedTitle": "Prominent Structures for Video Analysis and Editing", "fno": "08974422", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Graphics", "Image Matching", "Video Signal Processing", "Video Support", "Video Collection", "Structure Aware Video", "Sparse Units", "Temporally Stable Structural Units", "Video Frame", "Hierarchical Structure Alignment Algorithm", "Video Analysis", "Video Editing", "Image Edge Detection", "Visualization", "Saliency Detection", "Shape", "Image Color Analysis", "Cameras", "Video Structure", "Video Analysis", "Video Editing" ], "authors": [ { "givenName": "Miao", "surname": "Wang", "fullName": "Miao Wang", "affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, Research Institute for Frontier Science, Beihang University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xiao-Nan", "surname": "Fang", "fullName": "Xiao-Nan Fang", "affiliation": "BNRist, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Guo-Wei", "surname": "Yang", "fullName": "Guo-Wei Yang", "affiliation": "BNRist, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Ariel", "surname": "Shamir", "fullName": "Ariel Shamir", "affiliation": "Department of Computer Science, Interdisciplinary Center, Herzliya, Israel", "__typename": "ArticleAuthorType" }, { "givenName": "Shi-Min", "surname": "Hu", "fullName": "Shi-Min Hu", "affiliation": "BNRist, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2021-07-01 00:00:00", "pubType": "trans", "pages": "3305-3317", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2010/4109/0/4109d236", "title": "Keyframe-Guided Automatic Non-linear Video Editing", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109d236/12OmNBuL1mn", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2006/0366/0/04036847", "title": "Video and Audio Editing for Mobile Applications", "doi": null, "abstractUrl": "/proceedings-article/icme/2006/04036847/12OmNqOOrJJ", "parentPublication": { "id": "proceedings/icme/2006/0366/0", "title": "2006 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460164", "title": "A structure-based video representation for web video categorization", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460164/12OmNvDqsNe", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391d262", "title": "Multi-cue Structure Preserving MRF for Unconstrained Video Segmentation", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391d262/12OmNwlHT0C", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2002/1695/2/169521031", "title": "Video Editing Support System Based on Video Grammar and Content Analysis", "doi": null, "abstractUrl": "/proceedings-article/icpr/2002/169521031/12OmNyOq4SU", "parentPublication": { "id": "proceedings/icpr/2002/1695/2", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icassp/2002/7402/4/05745432", "title": "A generic video analysis and segmentation system", "doi": null, "abstractUrl": "/proceedings-article/icassp/2002/05745432/12OmNzGDsGs", "parentPublication": { "id": "proceedings/icassp/2002/7402/4", "title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2005/9331/0/01521514", "title": "Video quality analysis for an automated video capturing and editing system for conversation scenes", "doi": null, "abstractUrl": "/proceedings-article/icme/2005/01521514/12OmNzYNN7i", "parentPublication": { "id": "proceedings/icme/2005/9331/0", "title": "2005 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigmm/2018/5321/0/08499465", "title": "Automatic Generation of Textual Advertisement for Video Advertising", "doi": null, "abstractUrl": "/proceedings-article/bigmm/2018/08499465/17D45Vu1Tzq", "parentPublication": { "id": "proceedings/bigmm/2018/5321/0", "title": "2018 IEEE Fourth International Conference on Multimedia Big Data (BigMM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2005/2372/2/01467600", "title": "Videoshop: a new framework for spatio-temporal video editing in gradient domain", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2005/01467600/1htC63xnTYA", "parentPublication": { "id": "proceedings/cvpr/2005/2372/2", "title": "2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900b701", "title": "Editing like Humans: A Contextual, Multimodal Framework for Automated Video Editing", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900b701/1yXsBnK5gYw", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08967166", "articleId": "1gPjyNWFSgg", "__typename": "AdjacentArticleType" }, "next": { "fno": "08986688", "articleId": "1hed9kswQBW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1tWJllvBJ1C", "name": "ttg202107-08974422s1-supp1-2970045.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202107-08974422s1-supp1-2970045.mp4", "extension": "mp4", "size": "83.2 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1tWJ8EdItri", "title": "July", "year": "2021", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hed9kswQBW", "doi": "10.1109/TVCG.2020.2972357", "abstract": "Multiphase flows exhibit a large realm of complex behaviors such as bubbling, glugging, wetting, and splashing which emerge from air-water and water-solid interactions. Current fluid solvers in graphics have demonstrated remarkable success in reproducing each of these visual effects, but none have offered a model general enough to capture all of them concurrently. In contrast, computational fluid dynamics have developed very general approaches to multiphase flows, typically based on kinetic models. Yet, in both communities, there is dearth of methods that can simulate density ratios and Reynolds numbers required for the type of challenging real-life simulations that movie productions strive to digitally create, such as air-water flows. In this article, we propose a kinetic model of the coupling of the Navier-Stokes equations with a conservative phase-field equation, and provide a series of numerical improvements over existing kinetic-based approaches to offer a general multiphase flow solver. The resulting algorithm is embarrassingly parallel, conservative, far more stable than current solvers even for real-life conditions, and general enough to capture the typical multiphase flow behaviors. Various simulation results are presented, including comparisons to both previous work and real footage, to highlight the advantages of our new method.", "abstracts": [ { "abstractType": "Regular", "content": "Multiphase flows exhibit a large realm of complex behaviors such as bubbling, glugging, wetting, and splashing which emerge from air-water and water-solid interactions. Current fluid solvers in graphics have demonstrated remarkable success in reproducing each of these visual effects, but none have offered a model general enough to capture all of them concurrently. In contrast, computational fluid dynamics have developed very general approaches to multiphase flows, typically based on kinetic models. Yet, in both communities, there is dearth of methods that can simulate density ratios and Reynolds numbers required for the type of challenging real-life simulations that movie productions strive to digitally create, such as air-water flows. In this article, we propose a kinetic model of the coupling of the Navier-Stokes equations with a conservative phase-field equation, and provide a series of numerical improvements over existing kinetic-based approaches to offer a general multiphase flow solver. The resulting algorithm is embarrassingly parallel, conservative, far more stable than current solvers even for real-life conditions, and general enough to capture the typical multiphase flow behaviors. Various simulation results are presented, including comparisons to both previous work and real footage, to highlight the advantages of our new method.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Multiphase flows exhibit a large realm of complex behaviors such as bubbling, glugging, wetting, and splashing which emerge from air-water and water-solid interactions. Current fluid solvers in graphics have demonstrated remarkable success in reproducing each of these visual effects, but none have offered a model general enough to capture all of them concurrently. In contrast, computational fluid dynamics have developed very general approaches to multiphase flows, typically based on kinetic models. Yet, in both communities, there is dearth of methods that can simulate density ratios and Reynolds numbers required for the type of challenging real-life simulations that movie productions strive to digitally create, such as air-water flows. In this article, we propose a kinetic model of the coupling of the Navier-Stokes equations with a conservative phase-field equation, and provide a series of numerical improvements over existing kinetic-based approaches to offer a general multiphase flow solver. The resulting algorithm is embarrassingly parallel, conservative, far more stable than current solvers even for real-life conditions, and general enough to capture the typical multiphase flow behaviors. Various simulation results are presented, including comparisons to both previous work and real footage, to highlight the advantages of our new method.", "title": "Kinetic-Based Multiphase Flow Simulation", "normalizedTitle": "Kinetic-Based Multiphase Flow Simulation", "fno": "08986688", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Bubbles", "Computational Fluid Dynamics", "Flow Simulation", "Lattice Boltzmann Methods", "Multiphase Flow", "Navier Stokes Equations", "Two Phase Flow", "Water", "General Approaches", "Kinetic Model", "Real Life Simulations", "Air Water Flows", "Navier Stokes Equations", "Conservative Phase Field Equation", "General Multiphase Flow", "Current Solvers", "Typical Multiphase Flow Behaviors", "Kinetic Based Multiphase Flow Simulation", "Complex Behaviors", "Water Solid Interactions", "Current Fluid", "Visual Effects", "Computational Fluid Dynamics", "Computational Modeling", "Kinetic Theory", "Mathematical Model", "Atmospheric Modeling", "Numerical Models", "Solid Modeling", "Visualization", "Multiphase Flow", "Kinetic Theory", "Phase Field Lattice Boltzmann Model", "Interface Phenomena" ], "authors": [ { "givenName": "Wei", "surname": "Li", "fullName": "Wei Li", "affiliation": "School of Information Science and Technology, ShanghaiTech University, Pudong, Shanghai, China", "__typename": "ArticleAuthorType" }, { "givenName": "Daoming", "surname": "Liu", "fullName": "Daoming Liu", "affiliation": "School of Information Science and Technology, ShanghaiTech University, Pudong, Shanghai, China", "__typename": "ArticleAuthorType" }, { "givenName": "Mathieu", "surname": "Desbrun", "fullName": "Mathieu Desbrun", "affiliation": "School of Information Science and Technology, ShanghaiTech University, Pudong, Shanghai, China", "__typename": "ArticleAuthorType" }, { "givenName": "Jin", "surname": "Huang", "fullName": "Jin Huang", "affiliation": "State Key Lab of CAD & CG, College of Computer Science, Zhejiang University, Hangzhou, Zhejiang, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xiaopei", "surname": "Liu", "fullName": "Xiaopei Liu", "affiliation": "School of Information Science and Technology, ShanghaiTech University, Pudong, Shanghai, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2021-07-01 00:00:00", "pubType": "trans", "pages": "3318-3334", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iceet/2009/3819/1/3819a218", "title": "Numerical Simulation of Gas/Solid Flow in a Novel Annular Spouted Bed with Multiple Gas Nozzles", "doi": null, "abstractUrl": "/proceedings-article/iceet/2009/3819a218/12OmNCesr6k", "parentPublication": { "id": "proceedings/iceet/2009/3819/1", "title": "Energy and Environment Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icic/2010/7081/4/05514003", "title": "Numerical Simulation of the Flow Characteristic with Different Geometrical Jet", "doi": null, "abstractUrl": "/proceedings-article/icic/2010/05514003/12OmNqyDjpQ", "parentPublication": { "id": "proceedings/icic/2010/7081/4", "title": "2010 Third International Conference on Information and Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iceet/2009/3819/2/3819b613", "title": "Kinetic Modeling of MTBE Degradation by Stabilized Immobilized Beads", "doi": null, "abstractUrl": "/proceedings-article/iceet/2009/3819b613/12OmNxZBSC3", "parentPublication": { "id": "proceedings/iceet/2009/3819/2", "title": "Energy and Environment Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdma/2012/4772/0/4772a626", "title": "Progress in Numerical Simulation of High Entrained Air-Water Two-Phase Flow", "doi": null, "abstractUrl": "/proceedings-article/icdma/2012/4772a626/12OmNyQ7FDv", "parentPublication": { "id": "proceedings/icdma/2012/4772/0", "title": "2012 Third International Conference on Digital Manufacturing & Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcmp-ugc/2009/3946/0/3946a114", "title": "Simulation of Mach 3 Cylinder Flow Using Kinetic and Continuum Solvers", "doi": null, "abstractUrl": "/proceedings-article/hpcmp-ugc/2009/3946a114/12OmNzdoME1", "parentPublication": { "id": "proceedings/hpcmp-ugc/2009/3946/0", "title": "HPCMP Users Group Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/09/08419266", "title": "Continuous-Scale Kinetic Fluid Simulation", "doi": null, "abstractUrl": "/journal/tg/2019/09/08419266/13rRUwgyOjr", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvris/2018/8031/0/803100a075", "title": "Numerical Simulation of Three Dimensional Flow Field in Water Treatment Agitator Based on Fluent", "doi": null, "abstractUrl": "/proceedings-article/icvris/2018/803100a075/17D45WaTkjb", "parentPublication": { "id": "proceedings/icvris/2018/8031/0", "title": "2018 International Conference on Virtual Reality and Intelligent Systems (ICVRIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2021/05/08989968", "title": "Kineticist: Kinetic Sculpture Design Using Multilevel Skeletons", "doi": null, "abstractUrl": "/magazine/cg/2021/05/08989968/1hlpxrvYq8U", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090460", "title": "Robust turbulence simulation for particle-based fluids using the Rankine vortex model", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090460/1jIxpVaNpEQ", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/09/09355005", "title": "GPU Optimization for High-Quality Kinetic Fluid Simulation", "doi": null, "abstractUrl": "/journal/tg/2022/09/09355005/1rgClgs3uj6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08974422", "articleId": "1gZgXizpprG", "__typename": "AdjacentArticleType" }, "next": { "fno": "08994105", "articleId": "1hkQR7Os8SI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1tWJhYYXTB6", "name": "ttg202107-08986688s1-supp1-2972357.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202107-08986688s1-supp1-2972357.mp4", "extension": "mp4", "size": "190 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1tWJ8EdItri", "title": "July", "year": "2021", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hkQR7Os8SI", "doi": "10.1109/TVCG.2020.2973258", "abstract": "One major cause of performance degradation in predictive models is that the test samples are not well covered by the training data. Such not well-represented samples are called OoD samples. In this article, we propose OoDAnalyzer, a visual analysis approach for interactively identifying OoD samples and explaining them in context. Our approach integrates an ensemble OoD detection method and a grid-based visualization. The detection method is improved from deep ensembles by combining more features with algorithms in the same family. To better analyze and understand the OoD samples in context, we have developed a novel <i>k</i>NN-based grid layout algorithm motivated by Hall's theorem. The algorithm approximates the optimal layout and has O(<i>k</i>N<sup>2</sup>)O(<i>k</i>N<sup>2</sup>) time complexity, faster than the grid layout algorithm with overall best performance but O(N<sup>3</sup>)O(N<sup>3</sup>) time complexity. Quantitative evaluation and case studies were performed on several datasets to demonstrate the effectiveness and usefulness of OoDAnalyzer.", "abstracts": [ { "abstractType": "Regular", "content": "One major cause of performance degradation in predictive models is that the test samples are not well covered by the training data. Such not well-represented samples are called OoD samples. In this article, we propose OoDAnalyzer, a visual analysis approach for interactively identifying OoD samples and explaining them in context. Our approach integrates an ensemble OoD detection method and a grid-based visualization. The detection method is improved from deep ensembles by combining more features with algorithms in the same family. To better analyze and understand the OoD samples in context, we have developed a novel <i>k</i>NN-based grid layout algorithm motivated by Hall's theorem. The algorithm approximates the optimal layout and has O(<i>k</i>N<sup>2</sup>)O(<i>k</i>N<sup>2</sup>) time complexity, faster than the grid layout algorithm with overall best performance but O(N<sup>3</sup>)O(N<sup>3</sup>) time complexity. Quantitative evaluation and case studies were performed on several datasets to demonstrate the effectiveness and usefulness of OoDAnalyzer.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "One major cause of performance degradation in predictive models is that the test samples are not well covered by the training data. Such not well-represented samples are called OoD samples. In this article, we propose OoDAnalyzer, a visual analysis approach for interactively identifying OoD samples and explaining them in context. Our approach integrates an ensemble OoD detection method and a grid-based visualization. The detection method is improved from deep ensembles by combining more features with algorithms in the same family. To better analyze and understand the OoD samples in context, we have developed a novel kNN-based grid layout algorithm motivated by Hall's theorem. The algorithm approximates the optimal layout and has O(kN2)O(kN2) time complexity, faster than the grid layout algorithm with overall best performance but O(N3)O(N3) time complexity. Quantitative evaluation and case studies were performed on several datasets to demonstrate the effectiveness and usefulness of OoDAnalyzer.", "title": "OoDAnalyzer: Interactive Analysis of Out-of-Distribution Samples", "normalizedTitle": "OoDAnalyzer: Interactive Analysis of Out-of-Distribution Samples", "fno": "08994105", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computational Complexity", "Data Analysis", "Data Visualisation", "Graph Theory", "Interactive Systems", "Optimisation", "Oo D Analyzer", "Out Of Distribution Samples", "Oo D Samples", "Visual Analysis Approach", "Ensemble Oo D Detection Method", "Grid Based Visualization", "NN Based Grid", "Grid Layout Algorithm", "Training", "Layout", "Visualization", "Dogs", "Feature Extraction", "Approximation Algorithms", "Cats", "Out Of Distribution Detection", "Grid Layout", "Interactive Visualization" ], "authors": [ { "givenName": "Changjian", "surname": "Chen", "fullName": "Changjian Chen", "affiliation": "Department of Computer Science & Technology, BNRist, School of Software, Tsinghua University, Haidian, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Jun", "surname": "Yuan", "fullName": "Jun Yuan", "affiliation": "Department of Computer Science & Technology, BNRist, School of Software, Tsinghua University, Haidian, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yafeng", "surname": "Lu", "fullName": "Yafeng Lu", "affiliation": "BloombergL.P, New York, NY, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Yang", "surname": "Liu", "fullName": "Yang Liu", "affiliation": "Microsoft Research Asia, Haidian, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Hang", "surname": "Su", "fullName": "Hang Su", "affiliation": "Department of Computer Science & Technology, BNRist, School of Software, Tsinghua University, Haidian, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Songtao", "surname": "Yuan", "fullName": "Songtao Yuan", "affiliation": "First Affiliated Hospital of Nanjing Medical University, Nanjing, Jiangsu, China", "__typename": "ArticleAuthorType" }, { "givenName": "Shixia", "surname": "Liu", "fullName": "Shixia Liu", "affiliation": "Department of Computer Science & Technology, BNRist, School of Software, Tsinghua University, Haidian, Beijing, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2021-07-01 00:00:00", "pubType": "trans", "pages": "3335-3349", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2021/2812/0/281200b133", "title": "CODEs: Chamfer Out-of-Distribution Examples against Overconfidence Issue", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200b133/1BmFaDYHtgA", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200i281", "title": "Semantically Coherent Out-of-Distribution Detection", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200i281/1BmFq1tAb16", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900a163", "title": "RODD: A Self-Supervised Approach for Robust Out-of-Distribution Detection", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900a163/1G56R8MQAwg", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600q6876", "title": "Weakly Supervised Semantic Segmentation using Out-of-Distribution Data", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600q6876/1H1jN1lNZM4", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600c602", "title": "Heatmap-based Out-of-Distribution Detection", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600c602/1L8qqu8Q3OU", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600c643", "title": "Hyperdimensional Feature Fusion for Out-of-Distribution Detection", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600c643/1L8qr7oAN44", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300j517", "title": "Unsupervised Out-of-Distribution Detection by Maximum Classifier Discrepancy", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300j517/1hVl9mSRtfO", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09412612", "title": "Boundary Optimised Samples Training for Detecting Out-of-Distribution Images", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09412612/1tmiAPeMOOs", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900d285", "title": "Sample-free white-box out-of-distribution detection for deep learning", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900d285/1yXsVNgnUA0", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900j447", "title": "Out-of-Distribution Detection Using Union of 1-Dimensional Subspaces", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900j447/1yeKwz9Z2Uw", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08986688", "articleId": "1hed9kswQBW", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1tWJeZET9ny", "name": "ttg202107-08994105s1-supp1-2973258.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202107-08994105s1-supp1-2973258.pdf", "extension": "pdf", "size": "168 kB", "__typename": "WebExtraType" }, { "id": "1tWJfZTROIE", "name": "ttg202107-08994105s1-supp2-2973258.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202107-08994105s1-supp2-2973258.mp4", "extension": "mp4", "size": "46.2 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNz5apxc", "title": "July", "year": "2017", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "23", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUygBw7e", "doi": "10.1109/TVCG.2016.2551242", "abstract": "Reduced hair models have proven successful for interactively simulating a full head of hair strands, building upon a fundamental assumption that only a small set of guide hairs are needed for explicit simulation, and the rest of the hair move coherently and thus can be interpolated using guide hairs. Unfortunately, hair-solid interactions is a pathological case for traditional reduced hair models, as the motion coherence between hair strands can be arbitrarily broken by interacting with solids. In this paper, we propose an adaptive hair skinning method for interactive hair simulation with hair-solid collisions. We precompute many eligible sets of guide hairs and the corresponding interpolation relationships that are represented using a compact strand-based hair skinning model. At runtime, we simulate only guide hairs; for interpolating every other hair, we adaptively choose its guide hairs, taking into account motion coherence and potential hair-solid collisions. Further, we introduce a two-way collision correction algorithm to allow sparsely sampled guide hairs to resolve collisions with solids that can have small geometric features. Our method enables interactive simulation of more than 150 K hair strands interacting with complex solid objects, using 400 guide hairs. We demonstrate the efficiency and robustness of the method with various hairstyles and user-controlled arbitrary hair-solid interactions.", "abstracts": [ { "abstractType": "Regular", "content": "Reduced hair models have proven successful for interactively simulating a full head of hair strands, building upon a fundamental assumption that only a small set of guide hairs are needed for explicit simulation, and the rest of the hair move coherently and thus can be interpolated using guide hairs. Unfortunately, hair-solid interactions is a pathological case for traditional reduced hair models, as the motion coherence between hair strands can be arbitrarily broken by interacting with solids. In this paper, we propose an adaptive hair skinning method for interactive hair simulation with hair-solid collisions. We precompute many eligible sets of guide hairs and the corresponding interpolation relationships that are represented using a compact strand-based hair skinning model. At runtime, we simulate only guide hairs; for interpolating every other hair, we adaptively choose its guide hairs, taking into account motion coherence and potential hair-solid collisions. Further, we introduce a two-way collision correction algorithm to allow sparsely sampled guide hairs to resolve collisions with solids that can have small geometric features. Our method enables interactive simulation of more than 150 K hair strands interacting with complex solid objects, using 400 guide hairs. We demonstrate the efficiency and robustness of the method with various hairstyles and user-controlled arbitrary hair-solid interactions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Reduced hair models have proven successful for interactively simulating a full head of hair strands, building upon a fundamental assumption that only a small set of guide hairs are needed for explicit simulation, and the rest of the hair move coherently and thus can be interpolated using guide hairs. Unfortunately, hair-solid interactions is a pathological case for traditional reduced hair models, as the motion coherence between hair strands can be arbitrarily broken by interacting with solids. In this paper, we propose an adaptive hair skinning method for interactive hair simulation with hair-solid collisions. We precompute many eligible sets of guide hairs and the corresponding interpolation relationships that are represented using a compact strand-based hair skinning model. At runtime, we simulate only guide hairs; for interpolating every other hair, we adaptively choose its guide hairs, taking into account motion coherence and potential hair-solid collisions. Further, we introduce a two-way collision correction algorithm to allow sparsely sampled guide hairs to resolve collisions with solids that can have small geometric features. Our method enables interactive simulation of more than 150 K hair strands interacting with complex solid objects, using 400 guide hairs. We demonstrate the efficiency and robustness of the method with various hairstyles and user-controlled arbitrary hair-solid interactions.", "title": "Adaptive Skinning for Interactive Hair-Solid Simulation", "normalizedTitle": "Adaptive Skinning for Interactive Hair-Solid Simulation", "fno": "07448467", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Hair", "Adaptation Models", "Computational Modeling", "Solids", "Runtime", "Interpolation", "Animation", "Hair Simulation", "Interactive Method", "Reduced Model", "Adaptivity", "Collision Correction" ], "authors": [ { "givenName": "Menglei", "surname": "Chai", "fullName": "Menglei Chai", "affiliation": "State Key Laboratory of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Changxi", "surname": "Zheng", "fullName": "Changxi Zheng", "affiliation": "Department of Computer Science, Columbia University, 616 Schapiro (CEPSR), New York, NY", "__typename": "ArticleAuthorType" }, { "givenName": "Kun", "surname": "Zhou", "fullName": "Kun Zhou", "affiliation": "State Key Laboratory of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2017-07-01 00:00:00", "pubType": "trans", "pages": "1725-1738", "year": "2017", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2013/4989/0/4989a265", "title": "Wide-Baseline Hair Capture Using Strand-Based Refinement", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2013/4989a265/12OmNA0MYZM", "parentPublication": { "id": "proceedings/cvpr/2013/4989/0", "title": "2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2001/1195/0/11950186", "title": "A Design Tool for the Hierarchical Hair Model", "doi": null, "abstractUrl": "/proceedings-article/iv/2001/11950186/12OmNA2cYzp", "parentPublication": { "id": "proceedings/iv/2001/1195/0", "title": "Proceedings Fifth International Conference on Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ca/1999/0167/0/01670058", "title": "Visible Volume Buffer for Efficient Hair Expression and Shadow Generation", "doi": null, "abstractUrl": "/proceedings-article/ca/1999/01670058/12OmNCbkQC8", "parentPublication": { "id": "proceedings/ca/1999/0167/0", "title": "Computer Animation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/simultech/2014/060/0/07095029", "title": "2D hair strands generation based on template matching", "doi": null, "abstractUrl": "/proceedings-article/simultech/2014/07095029/12OmNx5GU7n", "parentPublication": { "id": "proceedings/simultech/2014/060/0", "title": "2014 International Conference on Simulation and Modeling Methodologies, Technologies and Applications (SIMULTECH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbgames/2009/3963/0/3963a185", "title": "Procedural Hair Generation", "doi": null, "abstractUrl": "/proceedings-article/sbgames/2009/3963a185/12OmNzgwmRo", "parentPublication": { "id": "proceedings/sbgames/2009/3963/0", "title": "2009 VIII Brazilian Symposium on Games and Digital Entertainment", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/03/06910280", "title": "2.5D Cartoon Hair Modeling and Manipulation", "doi": null, "abstractUrl": "/journal/tg/2015/03/06910280/13rRUIJuxpC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2001/03/mcg2001030036", "title": "V-HairStudio: An Interactive Tool for Hair Design", "doi": null, "abstractUrl": "/magazine/cg/2001/03/mcg2001030036/13rRUwInvDe", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/02/v0131", "title": "Real-Time Animation of Complex Hairstyles", "doi": null, "abstractUrl": "/journal/tg/2006/02/v0131/13rRUxZzAhw", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/03/08301570", "title": "A Skinned Tetrahedral Mesh for Hair Animation and Hair-Water Interaction", "doi": null, "abstractUrl": "/journal/tg/2019/03/08301570/17D45W9KVHk", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/04/09220808", "title": "Real-Time Hair Simulation With Neural Interpolation", "doi": null, "abstractUrl": "/journal/tg/2022/04/09220808/1nRLElyFvfG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": null, "next": { "fno": "07473883", "articleId": "13rRUxly8T1", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXWRXS", "name": "ttg201707-07448467s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201707-07448467s1.zip", "extension": "zip", "size": "49.3 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNz5apxc", "title": "July", "year": "2017", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "23", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxly8T1", "doi": "10.1109/TVCG.2016.2570755", "abstract": "Progressive Visual Analytics aims at improving the interactivity in existing analytics techniques by means of visualization as well as interaction with intermediate results. One key method for data analysis is dimensionality reduction, for example, to produce 2D embeddings that can be visualized and analyzed efficiently. t-Distributed Stochastic Neighbor Embedding (tSNE) is a well-suited technique for the visualization of high-dimensional data. tSNE can create meaningful intermediate results but suffers from a slow initialization that constrains its application in Progressive Visual Analytics. We introduce a controllable tSNE approximation (A-tSNE), which trades off speed and accuracy, to enable interactive data exploration. We offer real-time visualization techniques, including a density-based solution and a Magic Lens to inspect the degree of approximation. With this feedback, the user can decide on local refinements and steer the approximation level during the analysis. We demonstrate our technique with several datasets, in a real-world research scenario and for the real-time analysis of high-dimensional streams to illustrate its effectiveness for interactive data analysis.", "abstracts": [ { "abstractType": "Regular", "content": "Progressive Visual Analytics aims at improving the interactivity in existing analytics techniques by means of visualization as well as interaction with intermediate results. One key method for data analysis is dimensionality reduction, for example, to produce 2D embeddings that can be visualized and analyzed efficiently. t-Distributed Stochastic Neighbor Embedding (tSNE) is a well-suited technique for the visualization of high-dimensional data. tSNE can create meaningful intermediate results but suffers from a slow initialization that constrains its application in Progressive Visual Analytics. We introduce a controllable tSNE approximation (A-tSNE), which trades off speed and accuracy, to enable interactive data exploration. We offer real-time visualization techniques, including a density-based solution and a Magic Lens to inspect the degree of approximation. With this feedback, the user can decide on local refinements and steer the approximation level during the analysis. We demonstrate our technique with several datasets, in a real-world research scenario and for the real-time analysis of high-dimensional streams to illustrate its effectiveness for interactive data analysis.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Progressive Visual Analytics aims at improving the interactivity in existing analytics techniques by means of visualization as well as interaction with intermediate results. One key method for data analysis is dimensionality reduction, for example, to produce 2D embeddings that can be visualized and analyzed efficiently. t-Distributed Stochastic Neighbor Embedding (tSNE) is a well-suited technique for the visualization of high-dimensional data. tSNE can create meaningful intermediate results but suffers from a slow initialization that constrains its application in Progressive Visual Analytics. We introduce a controllable tSNE approximation (A-tSNE), which trades off speed and accuracy, to enable interactive data exploration. We offer real-time visualization techniques, including a density-based solution and a Magic Lens to inspect the degree of approximation. With this feedback, the user can decide on local refinements and steer the approximation level during the analysis. We demonstrate our technique with several datasets, in a real-world research scenario and for the real-time analysis of high-dimensional streams to illustrate its effectiveness for interactive data analysis.", "title": "Approximated and User Steerable tSNE for Progressive Visual Analytics", "normalizedTitle": "Approximated and User Steerable tSNE for Progressive Visual Analytics", "fno": "07473883", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualization", "Visual Analytics", "Algorithm Design And Analysis", "Approximation Algorithms", "Real Time Systems", "Computational Complexity", "High Dimensional Data", "Dimensionality Reduction", "Progressive Visual Analytics", "Approximate Computation" ], "authors": [ { "givenName": "Nicola", "surname": "Pezzotti", "fullName": "Nicola Pezzotti", "affiliation": "Computer Graphics and Visualization group, Delft University of Technology, Delft, The Netherlands", "__typename": "ArticleAuthorType" }, { "givenName": "Boudewijn P. F.", "surname": "Lelieveldt", "fullName": "Boudewijn P. F. Lelieveldt", "affiliation": "Division of Image Processing, Department of Radiology, Leiden University Medical Center, Leiden, The Netherlands", "__typename": "ArticleAuthorType" }, { "givenName": "Laurens van der", "surname": "Maaten", "fullName": "Laurens van der Maaten", "affiliation": "Pattern Recognition and Bioinformatics group, Delft University of Technology, Delft, The Netherlands", "__typename": "ArticleAuthorType" }, { "givenName": "Thomas", "surname": "Höllt", "fullName": "Thomas Höllt", "affiliation": "Computer Graphics and Visualization group, Delft University of Technology, Delft, The Netherlands", "__typename": "ArticleAuthorType" }, { "givenName": "Elmar", "surname": "Eisemann", "fullName": "Elmar Eisemann", "affiliation": "Computer Graphics and Visualization group, Delft University of Technology, Delft, The Netherlands", "__typename": "ArticleAuthorType" }, { "givenName": "Anna", "surname": "Vilanova", "fullName": "Anna Vilanova", "affiliation": "Computer Graphics and Visualization group, Delft University of Technology, Delft, The Netherlands", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2017-07-01 00:00:00", "pubType": "trans", "pages": "1739-1752", "year": "2017", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iv/2017/0831/0/0831a422", "title": "Visual Analytics for Electronic Intelligence: Challenges and Opportunities", "doi": null, "abstractUrl": "/proceedings-article/iv/2017/0831a422/12OmNB7LvBm", "parentPublication": { "id": "proceedings/iv/2017/0831/0", "title": "2017 21st International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2013/4892/0/4892b495", "title": "A Role for Reasoning in Visual Analytics", "doi": null, "abstractUrl": "/proceedings-article/hicss/2013/4892b495/12OmNqJ8tq4", "parentPublication": { "id": "proceedings/hicss/2013/4892/0", "title": "2013 46th Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2008/01/mcg2008010018", "title": "An Information-Theoretic View of Visual Analytics", "doi": null, "abstractUrl": "/magazine/cg/2008/01/mcg2008010018/13rRUB6SpRW", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2013/08/mco2013080090", "title": "Bixplorer: Visual Analytics with Biclusters", "doi": null, "abstractUrl": "/magazine/co/2013/08/mco2013080090/13rRUwcAqvs", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/01/08019872", "title": "DeepEyes: Progressive Visual Analytics for Designing Deep Neural Networks", "doi": null, "abstractUrl": "/journal/tg/2018/01/08019872/13rRUxlgxTs", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/12/06876049", "title": "Progressive Visual Analytics: User-Driven Visual Exploration of In-Progress Analytics", "doi": null, "abstractUrl": "/journal/tg/2014/12/06876049/13rRUyogGAd", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/02/08462793", "title": "PANENE: A Progressive Algorithm for Indexing and Querying Approximate <italic>k</italic>-Nearest Neighbors", "doi": null, "abstractUrl": "/journal/tg/2020/02/08462793/13w3lozFWqB", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/01/08467535", "title": "Visual Analytics for Topic Model Optimization based on User-Steerable Speculative Execution", "doi": null, "abstractUrl": "/journal/tg/2019/01/08467535/17D45XeKgtW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/07/08943144", "title": "ProReveal: Progressive Visual Analytics With Safeguards", "doi": null, "abstractUrl": "/journal/tg/2021/07/08943144/1g3bi26D34k", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vizsec/2019/3876/0/09161633", "title": "NetCapVis: Web-based Progressive Visual Analytics for Network Packet Captures", "doi": null, "abstractUrl": "/proceedings-article/vizsec/2019/09161633/1m6hHX7VF7y", "parentPublication": { "id": "proceedings/vizsec/2019/3876/0", "title": "2019 IEEE Symposium on Visualization for Cyber Security (VizSec)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07448467", "articleId": "13rRUygBw7e", "__typename": "AdjacentArticleType" }, "next": { "fno": "07452672", "articleId": "13rRUx0xPZB", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXWRX4", "name": "ttg201707-07473883s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201707-07473883s1.zip", "extension": "zip", "size": "46.7 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNz5apxc", "title": "July", "year": "2017", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "23", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUx0xPZB", "doi": "10.1109/TVCG.2016.2554114", "abstract": "Light scattering in participating media is a natural phenomenon that is increasingly featured in movies and games, as it is visually pleasing and lends realism to a scene. In art, it may further be used to express a certain mood or emphasize objects. Here, artists often rely on stylization when creating scattering effects, not only because of the complexity of physically correct scattering, but also to increase expressiveness. Little research, however, focuses on artistically influencing the simulation of the scattering process in a virtual 3D scene. We propose novel stylization techniques, enabling artists to change the appearance of single scattering effects such as light shafts. Users can add, remove, or enhance light shafts using occluder manipulation. The colors of the light shafts can be stylized and animated using easily modifiable transfer functions. Alternatively, our system can optimize a light map given a simple user input for a number of desired views in the 3D world. Finally, we enable artists to control the heterogeneity of the underlying medium. Our stylized scattering solution is easy to use and compatible with standard rendering pipelines. It works for animated scenes and can be executed in real time to provide the artist with quick feedback.", "abstracts": [ { "abstractType": "Regular", "content": "Light scattering in participating media is a natural phenomenon that is increasingly featured in movies and games, as it is visually pleasing and lends realism to a scene. In art, it may further be used to express a certain mood or emphasize objects. Here, artists often rely on stylization when creating scattering effects, not only because of the complexity of physically correct scattering, but also to increase expressiveness. Little research, however, focuses on artistically influencing the simulation of the scattering process in a virtual 3D scene. We propose novel stylization techniques, enabling artists to change the appearance of single scattering effects such as light shafts. Users can add, remove, or enhance light shafts using occluder manipulation. The colors of the light shafts can be stylized and animated using easily modifiable transfer functions. Alternatively, our system can optimize a light map given a simple user input for a number of desired views in the 3D world. Finally, we enable artists to control the heterogeneity of the underlying medium. Our stylized scattering solution is easy to use and compatible with standard rendering pipelines. It works for animated scenes and can be executed in real time to provide the artist with quick feedback.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Light scattering in participating media is a natural phenomenon that is increasingly featured in movies and games, as it is visually pleasing and lends realism to a scene. In art, it may further be used to express a certain mood or emphasize objects. Here, artists often rely on stylization when creating scattering effects, not only because of the complexity of physically correct scattering, but also to increase expressiveness. Little research, however, focuses on artistically influencing the simulation of the scattering process in a virtual 3D scene. We propose novel stylization techniques, enabling artists to change the appearance of single scattering effects such as light shafts. Users can add, remove, or enhance light shafts using occluder manipulation. The colors of the light shafts can be stylized and animated using easily modifiable transfer functions. Alternatively, our system can optimize a light map given a simple user input for a number of desired views in the 3D world. Finally, we enable artists to control the heterogeneity of the underlying medium. Our stylized scattering solution is easy to use and compatible with standard rendering pipelines. It works for animated scenes and can be executed in real time to provide the artist with quick feedback.", "title": "Expressive Single Scattering for Light Shaft Stylization", "normalizedTitle": "Expressive Single Scattering for Light Shaft Stylization", "fno": "07452672", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Scattering", "Shafts", "Image Color Analysis", "Media", "Art", "Transfer Functions", "Real Time Systems", "Interactive Stylization", "Artist Control", "Single Scattering" ], "authors": [ { "givenName": "Timothy R.", "surname": "Kol", "fullName": "Timothy R. Kol", "affiliation": "Computer Graphics & Visualization group at the Department of Intelligent Systems, Delft University of Technology, Delft, Zuid-Holland, The Netherlands", "__typename": "ArticleAuthorType" }, { "givenName": "Oliver", "surname": "Klehm", "fullName": "Oliver Klehm", "affiliation": "Department of Computer Graphics, MPI Informatik, Saarbrücken, Saarland, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Hans-Peter", "surname": "Seidel", "fullName": "Hans-Peter Seidel", "affiliation": "Department of Computer Graphics, MPI Informatik, Saarbrücken, Saarland, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Elmar", "surname": "Eisemann", "fullName": "Elmar Eisemann", "affiliation": "Computer Graphics & Visualization group at the Department of Intelligent Systems, Delft University of Technology, Delft, Zuid-Holland, The Netherlands", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2017-07-01 00:00:00", "pubType": "trans", "pages": "1753-1766", "year": "2017", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2017/0733/0/0733b287", "title": "Generating 5D Light Fields in Scattering Media for Representing 3D Images", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2017/0733b287/12OmNAndigF", "parentPublication": { "id": "proceedings/cvprw/2017/0733/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209e382", "title": "Light Transport Refocusing for Unknown Scattering Medium", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209e382/12OmNqzu6Nb", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2010/6984/0/05540216", "title": "Analysis of light transport in scattering media", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2010/05540216/12OmNxWLTlm", "parentPublication": { "id": "proceedings/cvpr/2010/6984/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032c420", "title": "Depth and Image Restoration from Light Field in a Scattering Medium", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032c420/12OmNxjjEm8", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2000/0868/0/08680031", "title": "Interactive Rendering Method for Displaying Shafts of Light", "doi": null, "abstractUrl": "/proceedings-article/pg/2000/08680031/12OmNzBOhSI", "parentPublication": { "id": "proceedings/pg/2000/0868/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cerma/2010/4204/0/4204a721", "title": "FPGA Implementation of a 16-Channel Lock-In Laser Light Scattering System", "doi": null, "abstractUrl": "/proceedings-article/cerma/2010/4204a721/12OmNzhna82", "parentPublication": { "id": "proceedings/cerma/2010/4204/0", "title": "Electronics, Robotics and Automotive Mechanics Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2005/2334/1/23340420", "title": "Structured Light in Scattering Media", "doi": null, "abstractUrl": "/proceedings-article/iccv/2005/23340420/12OmNzvz6Oz", "parentPublication": { "id": "proceedings/iccv/2005/2334/2", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/02/v0342", "title": "Light Scattering from Filaments", "doi": null, "abstractUrl": "/journal/tg/2007/02/v0342/13rRUwI5TXt", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/12/ttg2013122936", "title": "Ambient Volume Scattering", "doi": null, "abstractUrl": "/journal/tg/2013/12/ttg2013122936/13rRUwcAqqh", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/07/08600345", "title": "Precomputed Multiple Scattering for Rapid Light Simulation in Participating Media", "doi": null, "abstractUrl": "/journal/tg/2020/07/08600345/17D45Xh13tH", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07473883", "articleId": "13rRUxly8T1", "__typename": "AdjacentArticleType" }, "next": { "fno": "07470264", "articleId": "13rRUwI5TR3", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgQI", "name": "ttg201707-07452672s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201707-07452672s1.zip", "extension": "zip", "size": "153 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNz5apxc", "title": "July", "year": "2017", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "23", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwI5TR3", "doi": "10.1109/TVCG.2016.2569080", "abstract": "We present a novel method to optimize the attenuation of light for the single scattering model in direct volume rendering. A common problem of single scattering is the high dynamic range between lit and shadowed regions due to the exponential attenuation of light along a ray. Moreover, light is often attenuated too strong between a sample point and the camera, hampering the visibility of important features. Our algorithm employs an importance function to selectively illuminate important structures and make them visible from the camera. With the importance function, more light can be transmitted to the features of interest, while contextual structures cast shadows which provide visual cues for perception of depth. At the same time, more scattered light is transmitted from the sample point to the camera to improve the primary visibility of important features. We formulate a minimization problem that automatically determines the extinction along a view or shadow ray to obtain a good balance between sufficient transmittance and attenuation. In contrast to previous approaches, we do not require a computationally expensive solution of a global optimization, but instead provide a closed-form solution for each sampled extinction value along a view or shadow ray and thus achieve interactive performance.", "abstracts": [ { "abstractType": "Regular", "content": "We present a novel method to optimize the attenuation of light for the single scattering model in direct volume rendering. A common problem of single scattering is the high dynamic range between lit and shadowed regions due to the exponential attenuation of light along a ray. Moreover, light is often attenuated too strong between a sample point and the camera, hampering the visibility of important features. Our algorithm employs an importance function to selectively illuminate important structures and make them visible from the camera. With the importance function, more light can be transmitted to the features of interest, while contextual structures cast shadows which provide visual cues for perception of depth. At the same time, more scattered light is transmitted from the sample point to the camera to improve the primary visibility of important features. We formulate a minimization problem that automatically determines the extinction along a view or shadow ray to obtain a good balance between sufficient transmittance and attenuation. In contrast to previous approaches, we do not require a computationally expensive solution of a global optimization, but instead provide a closed-form solution for each sampled extinction value along a view or shadow ray and thus achieve interactive performance.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a novel method to optimize the attenuation of light for the single scattering model in direct volume rendering. A common problem of single scattering is the high dynamic range between lit and shadowed regions due to the exponential attenuation of light along a ray. Moreover, light is often attenuated too strong between a sample point and the camera, hampering the visibility of important features. Our algorithm employs an importance function to selectively illuminate important structures and make them visible from the camera. With the importance function, more light can be transmitted to the features of interest, while contextual structures cast shadows which provide visual cues for perception of depth. At the same time, more scattered light is transmitted from the sample point to the camera to improve the primary visibility of important features. We formulate a minimization problem that automatically determines the extinction along a view or shadow ray to obtain a good balance between sufficient transmittance and attenuation. In contrast to previous approaches, we do not require a computationally expensive solution of a global optimization, but instead provide a closed-form solution for each sampled extinction value along a view or shadow ray and thus achieve interactive performance.", "title": "Extinction-Optimized Volume Illumination", "normalizedTitle": "Extinction-Optimized Volume Illumination", "fno": "07470264", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Lighting", "Scattering", "Cost Function", "Rendering Computer Graphics", "Light Sources", "Visualization", "Direct Volume Rendering", "Volume Illumination", "Extinction Optimization" ], "authors": [ { "givenName": "Marco", "surname": "Ament", "fullName": "Marco Ament", "affiliation": "Karlsruhe Institute of Technology, Karlsruhe, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Tobias", "surname": "Zirr", "fullName": "Tobias Zirr", "affiliation": "Karlsruhe Institute of Technology, Karlsruhe, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Carsten", "surname": "Dachsbacher", "fullName": "Carsten Dachsbacher", "affiliation": "Karlsruhe Institute of Technology, Karlsruhe, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2017-07-01 00:00:00", "pubType": "trans", "pages": "1767-1781", "year": "2017", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/robot/1992/2720/0/00220125", "title": "Photometric stereo using point light sources", "doi": null, "abstractUrl": "/proceedings-article/robot/1992/00220125/12OmNAkEU4j", "parentPublication": { "id": "proceedings/robot/1992/2720/0", "title": "Proceedings 1992 IEEE International Conference on Robotics and Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2015/6879/0/07156382", "title": "Efficient volume illumination with multiple light sources through selective light updates", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2015/07156382/12OmNvDZF6A", "parentPublication": { "id": "proceedings/pacificvis/2015/6879/0", "title": "2015 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cis/2012/4896/0/4896a396", "title": "Fast Multiple Scattering in Participating Media with Beamlet Decomposition", "doi": null, "abstractUrl": "/proceedings-article/cis/2012/4896a396/12OmNwekjJa", "parentPublication": { "id": "proceedings/cis/2012/4896/0", "title": "2012 Eighth International Conference on Computational Intelligence and Security", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2013/6463/0/06528300", "title": "Descattering of transmissive observation using Parallel High-Frequency Illumination", "doi": null, "abstractUrl": "/proceedings-article/iccp/2013/06528300/12OmNzmclka", "parentPublication": { "id": "proceedings/iccp/2013/6463/0", "title": "2013 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391d595", "title": "Depth Selective Camera: A Direct, On-Chip, Programmable Technique for Depth Selectivity in Photography", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391d595/12OmNzt0INA", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2010/06/mcg2010060029", "title": "Advanced Volume Illumination with Unconstrained Light Source Positioning", "doi": null, "abstractUrl": "/magazine/cg/2010/06/mcg2010060029/13rRUNvPLcm", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/12/ttg2013122936", "title": "Ambient Volume Scattering", "doi": null, "abstractUrl": "/journal/tg/2013/12/ttg2013122936/13rRUwcAqqh", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/12/ttg2012122364", "title": "Historygrams: Enabling Interactive Global Illumination in Direct Volume Rendering using Photon Mapping", "doi": null, "abstractUrl": "/journal/tg/2012/12/ttg2012122364/13rRUyYjK5h", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/01/08017622", "title": "Interactive Dynamic Volume Illumination with Refraction and Caustics", "doi": null, "abstractUrl": "/journal/tg/2018/01/08017622/13rRUyfKIHU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccst/2020/8138/0/813800a546", "title": "Stage Lighting Simulation Based on Epipolar Sampling", "doi": null, "abstractUrl": "/proceedings-article/iccst/2020/813800a546/1p1goMqefzq", "parentPublication": { "id": "proceedings/iccst/2020/8138/0", "title": "2020 International Conference on Culture-oriented Science & Technology (ICCST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07452672", "articleId": "13rRUx0xPZB", "__typename": "AdjacentArticleType" }, "next": { "fno": "07471499", "articleId": "13rRUNvgz9V", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgGj", "name": "ttg201707-07470264s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201707-07470264s1.zip", "extension": "zip", "size": "34.5 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNz5apxc", "title": "July", "year": "2017", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "23", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUNvgz9V", "doi": "10.1109/TVCG.2016.2570215", "abstract": "Isosurfaces are fundamental geometrical objects for the analysis and visualization of volumetric scalar fields. Recent work has generalized them to bivariate volumetric fields with fiber surfaces, the pre-image of polygons in range space. However, the existing algorithm for their computation is approximate, and is limited to closed polygons. Moreover, its runtime performance does not allow instantaneous updates of the fiber surfaces upon user edits of the polygons. Overall, these limitations prevent a reliable and interactive exploration of the space of fiber surfaces. This paper introduces the first algorithm for the exact computation of fiber surfaces in tetrahedral meshes. It assumes no restriction on the topology of the input polygon, handles degenerate cases and better captures sharp features induced by polygon bends. The algorithm also allows visualization of individual fibers on the output surface, better illustrating their relationship with data features in range space. To enable truly interactive exploration sessions, we further improve the runtime performance of this algorithm. In particular, we show that it is trivially parallelizable and that it scales nearly linearly with the number of cores. Further, we study acceleration data-structures both in geometrical domain and range space and we show how to generalize interval trees used in isosurface extraction to fiber surface extraction. Experiments demonstrate the superiority of our algorithm over previous work, both in terms of accuracy and running time, with up to two orders of magnitude speedups. This improvement enables interactive edits of range polygons with instantaneous updates of the fiber surface for exploration purpose. A VTK-based reference implementation is provided as additional material to reproduce our results.", "abstracts": [ { "abstractType": "Regular", "content": "Isosurfaces are fundamental geometrical objects for the analysis and visualization of volumetric scalar fields. Recent work has generalized them to bivariate volumetric fields with fiber surfaces, the pre-image of polygons in range space. However, the existing algorithm for their computation is approximate, and is limited to closed polygons. Moreover, its runtime performance does not allow instantaneous updates of the fiber surfaces upon user edits of the polygons. Overall, these limitations prevent a reliable and interactive exploration of the space of fiber surfaces. This paper introduces the first algorithm for the exact computation of fiber surfaces in tetrahedral meshes. It assumes no restriction on the topology of the input polygon, handles degenerate cases and better captures sharp features induced by polygon bends. The algorithm also allows visualization of individual fibers on the output surface, better illustrating their relationship with data features in range space. To enable truly interactive exploration sessions, we further improve the runtime performance of this algorithm. In particular, we show that it is trivially parallelizable and that it scales nearly linearly with the number of cores. Further, we study acceleration data-structures both in geometrical domain and range space and we show how to generalize interval trees used in isosurface extraction to fiber surface extraction. Experiments demonstrate the superiority of our algorithm over previous work, both in terms of accuracy and running time, with up to two orders of magnitude speedups. This improvement enables interactive edits of range polygons with instantaneous updates of the fiber surface for exploration purpose. A VTK-based reference implementation is provided as additional material to reproduce our results.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Isosurfaces are fundamental geometrical objects for the analysis and visualization of volumetric scalar fields. Recent work has generalized them to bivariate volumetric fields with fiber surfaces, the pre-image of polygons in range space. However, the existing algorithm for their computation is approximate, and is limited to closed polygons. Moreover, its runtime performance does not allow instantaneous updates of the fiber surfaces upon user edits of the polygons. Overall, these limitations prevent a reliable and interactive exploration of the space of fiber surfaces. This paper introduces the first algorithm for the exact computation of fiber surfaces in tetrahedral meshes. It assumes no restriction on the topology of the input polygon, handles degenerate cases and better captures sharp features induced by polygon bends. The algorithm also allows visualization of individual fibers on the output surface, better illustrating their relationship with data features in range space. To enable truly interactive exploration sessions, we further improve the runtime performance of this algorithm. In particular, we show that it is trivially parallelizable and that it scales nearly linearly with the number of cores. Further, we study acceleration data-structures both in geometrical domain and range space and we show how to generalize interval trees used in isosurface extraction to fiber surface extraction. Experiments demonstrate the superiority of our algorithm over previous work, both in terms of accuracy and running time, with up to two orders of magnitude speedups. This improvement enables interactive edits of range polygons with instantaneous updates of the fiber surface for exploration purpose. A VTK-based reference implementation is provided as additional material to reproduce our results.", "title": "Fast and Exact Fiber Surfaces for Tetrahedral Meshes", "normalizedTitle": "Fast and Exact Fiber Surfaces for Tetrahedral Meshes", "fno": "07471499", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Isosurfaces", "Acceleration", "Topology", "Feature Extraction", "Chemicals", "Robustness", "Bivariate Data", "Data Segmentation", "Data Analysis", "Isosurfaces", "Continuous Scatterplot" ], "authors": [ { "givenName": "Pavol", "surname": "Klacansky", "fullName": "Pavol Klacansky", "affiliation": "SCI Institute, University of Utah, Salt Lake City, UT", "__typename": "ArticleAuthorType" }, { "givenName": "Julien", "surname": "Tierny", "fullName": "Julien Tierny", "affiliation": "Sorbonne Universites, UPMC Univ Paris 06, CNRS, France", "__typename": "ArticleAuthorType" }, { "givenName": "Hamish", "surname": "Carr", "fullName": "Hamish Carr", "affiliation": "University of Leeds, Leeds, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Zhao", "surname": "Geng", "fullName": "Zhao Geng", "affiliation": "University of Leeds, Leeds, United Kingdom", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2017-07-01 00:00:00", "pubType": "trans", "pages": "1782-1795", "year": "2017", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/2005/2766/0/01532824", "title": "Reconstructing manifold and non-manifold surfaces from point clouds", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532824/12OmNrkT7FS", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2006/2686/0/26860213", "title": "Adapted Dynamic Meshes for Deformable Surfaces", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2006/26860213/12OmNzBOi0T", "parentPublication": { "id": "proceedings/sibgrapi/2006/2686/0", "title": "2006 19th Brazilian Symposium on Computer Graphics and Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07539653", "title": "Direct Multifield Volume Ray Casting of Fiber Surfaces", "doi": null, "abstractUrl": "/journal/tg/2017/01/07539653/13rRUB6Sq0C", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2003/01/v0099", "title": "Polynomial Surfaces Interpolating Arbitrary Triangulations", "doi": null, "abstractUrl": "/journal/tg/2003/01/v0099/13rRUwInvAR", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07539583", "title": "Jacobi Fiber Surfaces for Bivariate Reeb Space Computation", "doi": null, "abstractUrl": "/journal/tg/2017/01/07539583/13rRUx0xPif", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/01/07192700", "title": "Interactive Visualization for Singular Fibers of Functions f : R3 → R2", "doi": null, "abstractUrl": "/journal/tg/2016/01/07192700/13rRUxly9dW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2015/12/07053934", "title": "Maurer-Cartan Forms for Fields on Surfaces: Application to Heart Fiber Geometry", "doi": null, "abstractUrl": "/journal/tp/2015/12/07053934/13rRUygT7u1", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/01/08447439", "title": "Tensor Field Visualization using Fiber Surfaces of Invariant Space", "doi": null, "abstractUrl": "/journal/tg/2019/01/08447439/17D45W1Oa1M", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09767783", "title": "b/Surf: Interactive Bézier Splines on Surface Meshes", "doi": null, "abstractUrl": "/journal/tg/5555/01/09767783/1D4MIotOemQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/topoinvis/2022/9354/0/935400a049", "title": "Jacobi Set Driven Search for Flexible Fiber Surface Extraction", "doi": null, "abstractUrl": "/proceedings-article/topoinvis/2022/935400a049/1J2XKqOgZI4", "parentPublication": { "id": "proceedings/topoinvis/2022/9354/0", "title": "2022 Topological Data Analysis and Visualization (TopoInVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07470264", "articleId": "13rRUwI5TR3", "__typename": "AdjacentArticleType" }, "next": { "fno": "07482721", "articleId": "13rRUxcbnCv", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgRG", "name": "ttg201707-07471499s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201707-07471499s1.zip", "extension": "zip", "size": "5.65 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNz5apxc", "title": "July", "year": "2017", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "23", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxcbnCv", "doi": "10.1109/TVCG.2016.2574705", "abstract": "We introduce an interactive user-driven method to reconstruct high-relief 3D geometry from a single photo. Particularly, we consider two novel but challenging reconstruction issues: i) common non-rigid objects whose shapes are organic rather than polyhedral/symmetric, and ii) double-sided structures, where front and back sides of some curvy object parts are revealed simultaneously on image. To address these issues, we develop a three-stage computational pipeline. First, we construct a 2.5D model from the input image by user-driven segmentation, automatic layering, and region completion, handling three common types of occlusion. Second, users can interactively mark-up slope and curvature cues on the image to guide our constrained optimization model to inflate and lift up the image layers. We provide real-time preview of the inflated geometry to allow interactive editing. Third, we stitch and optimize the inflated layers to produce a high-relief 3D model. Compared to previous work, we can generate high-relief geometry with large viewing angles, handle complex organic objects with multiple occluded regions and varying shape profiles, and reconstruct objects with double-sided structures. Lastly, we demonstrate the applicability of our method on a wide variety of input images with human, animals, flowers, etc.", "abstracts": [ { "abstractType": "Regular", "content": "We introduce an interactive user-driven method to reconstruct high-relief 3D geometry from a single photo. Particularly, we consider two novel but challenging reconstruction issues: i) common non-rigid objects whose shapes are organic rather than polyhedral/symmetric, and ii) double-sided structures, where front and back sides of some curvy object parts are revealed simultaneously on image. To address these issues, we develop a three-stage computational pipeline. First, we construct a 2.5D model from the input image by user-driven segmentation, automatic layering, and region completion, handling three common types of occlusion. Second, users can interactively mark-up slope and curvature cues on the image to guide our constrained optimization model to inflate and lift up the image layers. We provide real-time preview of the inflated geometry to allow interactive editing. Third, we stitch and optimize the inflated layers to produce a high-relief 3D model. Compared to previous work, we can generate high-relief geometry with large viewing angles, handle complex organic objects with multiple occluded regions and varying shape profiles, and reconstruct objects with double-sided structures. Lastly, we demonstrate the applicability of our method on a wide variety of input images with human, animals, flowers, etc.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We introduce an interactive user-driven method to reconstruct high-relief 3D geometry from a single photo. Particularly, we consider two novel but challenging reconstruction issues: i) common non-rigid objects whose shapes are organic rather than polyhedral/symmetric, and ii) double-sided structures, where front and back sides of some curvy object parts are revealed simultaneously on image. To address these issues, we develop a three-stage computational pipeline. First, we construct a 2.5D model from the input image by user-driven segmentation, automatic layering, and region completion, handling three common types of occlusion. Second, users can interactively mark-up slope and curvature cues on the image to guide our constrained optimization model to inflate and lift up the image layers. We provide real-time preview of the inflated geometry to allow interactive editing. Third, we stitch and optimize the inflated layers to produce a high-relief 3D model. Compared to previous work, we can generate high-relief geometry with large viewing angles, handle complex organic objects with multiple occluded regions and varying shape profiles, and reconstruct objects with double-sided structures. Lastly, we demonstrate the applicability of our method on a wide variety of input images with human, animals, flowers, etc.", "title": "Interactive High-Relief Reconstruction for Organic and Double-Sided Objects from a Photo", "normalizedTitle": "Interactive High-Relief Reconstruction for Organic and Double-Sided Objects from a Photo", "fno": "07482721", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Three Dimensional Displays", "Image Reconstruction", "Geometry", "Shape", "Solid Modeling", "Image Segmentation", "Surface Reconstruction", "Reconstruction", "High Relief", "Lenticular Posters", "Single Image", "Folded", "Double Sided", "Object Modeling", "Depth Cues", "Completion", "Inflation" ], "authors": [ { "givenName": "Chih-Kuo", "surname": "Yeh", "fullName": "Chih-Kuo Yeh", "affiliation": "Department of Computer Science and Information Engineering, National Cheng-Kung University, Tainan City, Taiwan, R.O.C", "__typename": "ArticleAuthorType" }, { "givenName": "Shi-Yang", "surname": "Huang", "fullName": "Shi-Yang Huang", "affiliation": "Department of Computer Science and Information Engineering, National Cheng-Kung University, Tainan City, Taiwan, R.O.C", "__typename": "ArticleAuthorType" }, { "givenName": "Pradeep Kumar", "surname": "Jayaraman", "fullName": "Pradeep Kumar Jayaraman", "affiliation": "School of Computer Engineering, Nanyang Technological University, Singapore, 639798, Singapore", "__typename": "ArticleAuthorType" }, { "givenName": "Chi-Wing", "surname": "Fu", "fullName": "Chi-Wing Fu", "affiliation": "Department of Computer Science and Engineering, The Chinese University of Hong Kong, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Tong-Yee", "surname": "Lee", "fullName": "Tong-Yee Lee", "affiliation": "Department of Computer Science and Information Engineering, National Cheng-Kung University, Tainan City, Taiwan, R.O.C", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2017-07-01 00:00:00", "pubType": "trans", "pages": "1796-1808", "year": "2017", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/pg/1997/8028/0/80280091", "title": "Modification of n-sided patches based on variation of blending functions", "doi": null, "abstractUrl": "/proceedings-article/pg/1997/80280091/12OmNAsTgQO", "parentPublication": { "id": "proceedings/pg/1997/8028/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2014/7000/1/7000a440", "title": "Merge2-3D: Combining Multiple Normal Maps with 3D Surfaces", "doi": null, "abstractUrl": "/proceedings-article/3dv/2014/7000a440/12OmNx8Ouv7", "parentPublication": { "id": "3dv/2014/7000/1", "title": "2014 2nd International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2016/2312/0/2312a022", "title": "A Point Cloud Model Based Image Relief Effect Design", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2016/2312a022/12OmNyNQSQ4", "parentPublication": { "id": "proceedings/icmtma/2016/2312/0", "title": "2016 Eighth International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2015/7962/0/7962a001", "title": "Meta-Relief Texture Mapping with Dynamic Texture-Space Ambient Occlusion", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2015/7962a001/12OmNyp9MiX", "parentPublication": { "id": "proceedings/sibgrapi/2015/7962/0", "title": "2015 28th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/02/ttg2013020225", "title": "Double-Sided 2.5D Graphics", "doi": null, "abstractUrl": "/journal/tg/2013/02/ttg2013020225/13rRUEgs2M0", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2003/03/mcg2003030038", "title": "Generating Organic Textures with Controlled Anisotropy and Directionality", "doi": null, "abstractUrl": "/magazine/cg/2003/03/mcg2003030038/13rRUxCitLE", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/08/08611145", "title": "Portrait Relief Modeling from a Single Image", "doi": null, "abstractUrl": "/journal/tg/2020/08/08611145/17D45XDIXSX", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/04/08322258", "title": "Bas-Relief Modeling from Normal Layers", "doi": null, "abstractUrl": "/journal/tg/2019/04/08322258/17YCN5E6cAE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09852330", "title": "Neural Modeling of Portrait Bas-relief from a Single Photograph", "doi": null, "abstractUrl": "/journal/tg/5555/01/09852330/1FFHdt1RWHC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09468903", "title": "Human Bas-Relief Generation From a Single Photograph", "doi": null, "abstractUrl": "/journal/tg/2022/12/09468903/1uR9KNPeety", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07471499", "articleId": "13rRUNvgz9V", "__typename": "AdjacentArticleType" }, "next": { "fno": "07451283", "articleId": "13rRUxC0Sw0", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXWRXt", "name": "ttg201707-07482721s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201707-07482721s1.zip", "extension": "zip", "size": "19.5 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNz5apxc", "title": "July", "year": "2017", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "23", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxC0Sw0", "doi": "10.1109/TVCG.2016.2553102", "abstract": "We present a novel approach for constructing a complete 3D model for an object from a single RGBD image. Given an image of an object segmented from the background, a collection of 3D models of the same category are non-rigidly aligned with the input depth, to compute a rough initial result. A volumetric-patch-based optimization algorithm is then performed to refine the initial result to generate a 3D model that not only is globally consistent with the overall shape expected from the input image but also possesses geometric details similar to those in the input image. The optimization with a set of high-level constraints, such as visibility, surface confidence and symmetry, can achieve more robust and accurate completion over state-of-the art techniques. We demonstrate the efficiency and robustness of our approach with multiple categories of objects with various geometries and details, including busts, chairs, bikes, toys, vases and tables.", "abstracts": [ { "abstractType": "Regular", "content": "We present a novel approach for constructing a complete 3D model for an object from a single RGBD image. Given an image of an object segmented from the background, a collection of 3D models of the same category are non-rigidly aligned with the input depth, to compute a rough initial result. A volumetric-patch-based optimization algorithm is then performed to refine the initial result to generate a 3D model that not only is globally consistent with the overall shape expected from the input image but also possesses geometric details similar to those in the input image. The optimization with a set of high-level constraints, such as visibility, surface confidence and symmetry, can achieve more robust and accurate completion over state-of-the art techniques. We demonstrate the efficiency and robustness of our approach with multiple categories of objects with various geometries and details, including busts, chairs, bikes, toys, vases and tables.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a novel approach for constructing a complete 3D model for an object from a single RGBD image. Given an image of an object segmented from the background, a collection of 3D models of the same category are non-rigidly aligned with the input depth, to compute a rough initial result. A volumetric-patch-based optimization algorithm is then performed to refine the initial result to generate a 3D model that not only is globally consistent with the overall shape expected from the input image but also possesses geometric details similar to those in the input image. The optimization with a set of high-level constraints, such as visibility, surface confidence and symmetry, can achieve more robust and accurate completion over state-of-the art techniques. We demonstrate the efficiency and robustness of our approach with multiple categories of objects with various geometries and details, including busts, chairs, bikes, toys, vases and tables.", "title": "Shape Completion from a Single RGBD Image", "normalizedTitle": "Shape Completion from a Single RGBD Image", "fno": "07451283", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Shape", "Three Dimensional Displays", "Solid Modeling", "Geometry", "Optimization", "Computational Modeling", "Deformable Models", "RGBD Camera", "Shape Completion", "Single RGBD Image" ], "authors": [ { "givenName": "Dongping", "surname": "Li", "fullName": "Dongping Li", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Tianjia", "surname": "Shao", "fullName": "Tianjia Shao", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Hongzhi", "surname": "Wu", "fullName": "Hongzhi Wu", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Kun", "surname": "Zhou", "fullName": "Kun Zhou", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2017-07-01 00:00:00", "pubType": "trans", "pages": "1809-1822", "year": "2017", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2017/0457/0/0457g545", "title": "Shape Completion Using 3D-Encoder-Predictor CNNs and Shape Synthesis", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457g545/12OmNAsBFOK", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2018/4886/0/488601a858", "title": "DeformNet: Free-Form Deformation Network for 3D Shape Reconstruction from a Single Image", "doi": null, "abstractUrl": "/proceedings-article/wacv/2018/488601a858/12OmNyKJiqm", "parentPublication": { "id": "proceedings/wacv/2018/4886/0", "title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457a190", "title": "Semantic Scene Completion from a Single Depth Image", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457a190/12OmNzn38Ky", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200m2569", "title": "Patch2CAD: Patchwise Embedding Learning for In-the-Wild Shape Retrieval from a Single Image", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200m2569/1BmFAk4MRe8", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600a775", "title": "SIRA: Relightable Avatars from a Single Image", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600a775/1L6LvQR1bs4", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cse/2022/9633/0/963300a024", "title": "Dense 3D Face Reconstruction from a Single RGB Image", "doi": null, "abstractUrl": "/proceedings-article/cse/2022/963300a024/1Lz24fdFD32", "parentPublication": { "id": "proceedings/cse/2022/9633/0", "title": "2022 IEEE 25th International Conference on Computational Science and Engineering (CSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2019/3131/0/313100a643", "title": "360-Degree Textures of People in Clothing from a Single Image", "doi": null, "abstractUrl": "/proceedings-article/3dv/2019/313100a643/1ezRDPNhkpW", "parentPublication": { "id": "proceedings/3dv/2019/3131/0", "title": "2019 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/10/09462521", "title": "View-Aware Geometry-Structure Joint Learning for Single-View 3D Shape Reconstruction", "doi": null, "abstractUrl": "/journal/tp/2022/10/09462521/1uDSvbmzJQc", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2020/9234/0/923400a184", "title": "Deep 3D Shape Reconstruction from Single-View Sketch Image", "doi": null, "abstractUrl": "/proceedings-article/icdh/2020/923400a184/1uGY2GTiIda", "parentPublication": { "id": "proceedings/icdh/2020/9234/0", "title": "2020 8th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2021/2688/0/268800a815", "title": "SIDER: Single-Image Neural Optimization for Facial Geometric Detail Recovery", "doi": null, "abstractUrl": "/proceedings-article/3dv/2021/268800a815/1zWE94Zh1Ru", "parentPublication": { "id": "proceedings/3dv/2021/2688/0", "title": "2021 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07482721", "articleId": "13rRUxcbnCv", "__typename": "AdjacentArticleType" }, "next": { "fno": "07439844", "articleId": "13rRUy2YLYD", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTYesQC", "name": "ttg201707-07451283s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201707-07451283s1.zip", "extension": "zip", "size": "33.9 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNz5apxc", "title": "July", "year": "2017", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "23", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUy2YLYD", "doi": "10.1109/TVCG.2016.2545670", "abstract": "We present a novel dense crowd simulation method. In real crowds of high density, people manoeuvring the crowd need to twist their torso to pass between others. Our proposed method does not use the traditional disc-shaped agent, but instead employs capsule-shaped agents, which enables us to plan such torso orientations. Contrary to other crowd simulation systems, which often focus on the movement of the entire crowd, our method distinguishes between active agents that try to manoeuvre through the crowd, and passive agents that have no incentive to move. We introduce the concept of a focus point to influence crowd agent orientation. Recorded data from real human crowds are used for validation, which shows that our proposed model produces equivalent paths for 85 percent of the validation set. Furthermore, we present a character animation technique that uses the results from our crowd model to generate torso-twisting and side-stepping characters.", "abstracts": [ { "abstractType": "Regular", "content": "We present a novel dense crowd simulation method. In real crowds of high density, people manoeuvring the crowd need to twist their torso to pass between others. Our proposed method does not use the traditional disc-shaped agent, but instead employs capsule-shaped agents, which enables us to plan such torso orientations. Contrary to other crowd simulation systems, which often focus on the movement of the entire crowd, our method distinguishes between active agents that try to manoeuvre through the crowd, and passive agents that have no incentive to move. We introduce the concept of a focus point to influence crowd agent orientation. Recorded data from real human crowds are used for validation, which shows that our proposed model produces equivalent paths for 85 percent of the validation set. Furthermore, we present a character animation technique that uses the results from our crowd model to generate torso-twisting and side-stepping characters.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a novel dense crowd simulation method. In real crowds of high density, people manoeuvring the crowd need to twist their torso to pass between others. Our proposed method does not use the traditional disc-shaped agent, but instead employs capsule-shaped agents, which enables us to plan such torso orientations. Contrary to other crowd simulation systems, which often focus on the movement of the entire crowd, our method distinguishes between active agents that try to manoeuvre through the crowd, and passive agents that have no incentive to move. We introduce the concept of a focus point to influence crowd agent orientation. Recorded data from real human crowds are used for validation, which shows that our proposed model produces equivalent paths for 85 percent of the validation set. Furthermore, we present a character animation technique that uses the results from our crowd model to generate torso-twisting and side-stepping characters.", "title": "Torso Crowds", "normalizedTitle": "Torso Crowds", "fno": "07439844", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Animation", "Multi Agent Systems", "Torso Crowds", "Dense Crowd Simulation Method", "Disc Shaped Agent", "Capsule Shaped Agents", "Torso Orientation", "Focus Point Concept", "Torso Twisting Characters", "Side Stepping Characters", "Character Animation Technique", "Torso", "Computational Modeling", "Planning", "Animation", "Shape", "Data Models", "Legged Locomotion", "Crowd Simulation", "Crowd Animation", "Dense Crowds", "Agent Representation", "Holonomic Motion" ], "authors": [ { "givenName": "Sybren A.", "surname": "Stüvel", "fullName": "Sybren A. Stüvel", "affiliation": "Virtual Human Technology lab, Utrecht University, Utrecht, The Netherlands", "__typename": "ArticleAuthorType" }, { "givenName": "Nadia", "surname": "Magnenat-Thalmann", "fullName": "Nadia Magnenat-Thalmann", "affiliation": "Institute for Media Innovation, Nanyang Technological University, Singapore", "__typename": "ArticleAuthorType" }, { "givenName": "Daniel", "surname": "Thalmann", "fullName": "Daniel Thalmann", "affiliation": "Institute for Media Innovation, Nanyang Technological University, Singapore", "__typename": "ArticleAuthorType" }, { "givenName": "A. Frank van der", "surname": "Stappen", "fullName": "A. Frank van der Stappen", "affiliation": "Virtual Human Technology lab, Utrecht University, Utrecht, The Netherlands", "__typename": "ArticleAuthorType" }, { "givenName": "Arjan", "surname": "Egges", "fullName": "Arjan Egges", "affiliation": "Virtual Human Technology lab, Utrecht University, Utrecht, The Netherlands", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2017-07-01 00:00:00", "pubType": "trans", "pages": "1823-1837", "year": "2017", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ca/2001/7237/0/00982381", "title": "Simulating virtual human crowds with a leader-follower model", "doi": null, "abstractUrl": "/proceedings-article/ca/2001/00982381/12OmNBr4ewM", "parentPublication": { "id": "proceedings/ca/2001/7237/0", "title": "Proceedings Computer Animation 2001. Fourteenth Conference on Computer Animation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2013/5051/0/5051a017", "title": "Simulating Gait and Structural Effects of Aging for Improved Diversity in Virtual Crowds", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2013/5051a017/12OmNrnJ6VZ", "parentPublication": { "id": "proceedings/cgiv/2013/5051/0", "title": "2013 10th International Conference Computer Graphics, Imaging and Visualization (CGIV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2006/0226/0/02260014", "title": "Harness Design and Coupling Stiffness for Two-Axis Torso Haptics", "doi": null, "abstractUrl": "/proceedings-article/haptics/2006/02260014/12OmNwDACDX", "parentPublication": { "id": "proceedings/haptics/2006/0226/0", "title": "2006 14th Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptic/2006/0226/0/01627067", "title": "Harness Design and Coupling Stiffness for Two-Axis Torso Haptics", "doi": null, "abstractUrl": "/proceedings-article/haptic/2006/01627067/12OmNyuyacp", "parentPublication": { "id": "proceedings/haptic/2006/0226/0", "title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2011/0063/0/06130351", "title": "Spatiotemporally localized new event detection in crowds", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2011/06130351/12OmNz6iOpj", "parentPublication": { "id": "proceedings/iccvw/2011/0063/0", "title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2001/02/v0152", "title": "Hierarchical Model for Real Time Simulation of Virtual Human Crowds", "doi": null, "abstractUrl": "/journal/tg/2001/02/v0152/13rRUwkxc5h", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vs-games/2019/4540/0/08864512", "title": "Social-aware navigation in crowds with static and dynamic groups", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2019/08864512/1e5ZqjnX5UA", "parentPublication": { "id": "proceedings/vs-games/2019/4540/0", "title": "2019 11th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2019/5604/0/560400a306", "title": "Implementing Position-Based Real-Time Simulation of Large Crowds", "doi": null, "abstractUrl": "/proceedings-article/aivr/2019/560400a306/1grOkE6FToY", "parentPublication": { "id": "proceedings/aivr/2019/5604/0", "title": "2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089637", "title": "Eye-Gaze Activity in Crowds: Impact of Virtual Reality and Density", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089637/1jIx9WIWd5C", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089573", "title": "Effects of Interacting with a Crowd of Emotional Virtual Humans on Users&#x2019; Affective and Non-Verbal Behaviors", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089573/1jIxfPwklig", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07451283", "articleId": "13rRUxC0Sw0", "__typename": "AdjacentArticleType" }, "next": { "fno": "07460953", "articleId": "13rRUILc8ff", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNz5apxc", "title": "July", "year": "2017", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "23", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUILc8ff", "doi": "10.1109/TVCG.2016.2559483", "abstract": "We present a new approach to rendering a geometrically-correct user-perspective view for a magic lens interface, based on leveraging the gradients in the real world scene. Our approach couples a recent gradient-domain image-based rendering method with a novel semi-dense stereo matching algorithm. Our stereo algorithm borrows ideas from PatchMatch, and adapts them to semi-dense stereo. This approach is implemented in a prototype device build from off-the-shelf hardware, with no active depth sensing. Despite the limited depth data, we achieve high-quality rendering for the user-perspective magic lens.", "abstracts": [ { "abstractType": "Regular", "content": "We present a new approach to rendering a geometrically-correct user-perspective view for a magic lens interface, based on leveraging the gradients in the real world scene. Our approach couples a recent gradient-domain image-based rendering method with a novel semi-dense stereo matching algorithm. Our stereo algorithm borrows ideas from PatchMatch, and adapts them to semi-dense stereo. This approach is implemented in a prototype device build from off-the-shelf hardware, with no active depth sensing. Despite the limited depth data, we achieve high-quality rendering for the user-perspective magic lens.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a new approach to rendering a geometrically-correct user-perspective view for a magic lens interface, based on leveraging the gradients in the real world scene. Our approach couples a recent gradient-domain image-based rendering method with a novel semi-dense stereo matching algorithm. Our stereo algorithm borrows ideas from PatchMatch, and adapts them to semi-dense stereo. This approach is implemented in a prototype device build from off-the-shelf hardware, with no active depth sensing. Despite the limited depth data, we achieve high-quality rendering for the user-perspective magic lens.", "title": "User-Perspective AR Magic Lens from Gradient-Based IBR and Semi-Dense Stereo", "normalizedTitle": "User-Perspective AR Magic Lens from Gradient-Based IBR and Semi-Dense Stereo", "fno": "07460953", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Lenses", "Rendering Computer Graphics", "Image Reconstruction", "Cameras", "Real Time Systems", "Sensors", "Augmented Reality", "Magic Lens", "User Perspective", "Image Based Rendering", "Gradient Domain", "Semi Dense Stereo" ], "authors": [ { "givenName": "Domagoj", "surname": "Baričević", "fullName": "Domagoj Baričević", "affiliation": "Department of Computer Science, University of California, Santa Barbara, CA", "__typename": "ArticleAuthorType" }, { "givenName": "Tobias", "surname": "Höllerer", "fullName": "Tobias Höllerer", "affiliation": "Department of Computer Science, University of California, Santa Barbara, CA", "__typename": "ArticleAuthorType" }, { "givenName": "Pradeep", "surname": "Sen", "fullName": "Pradeep Sen", "affiliation": "Department of Electrical and Computer Engineering, University of California, Santa Barbara, CA", "__typename": "ArticleAuthorType" }, { "givenName": "Matthew", "surname": "Turk", "fullName": "Matthew Turk", "affiliation": "Department of Computer Science, University of California, Santa Barbara, CA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2017-07-01 00:00:00", "pubType": "trans", "pages": "1838-1851", "year": "2017", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2008/1971/0/04480772", "title": "New Rendering Approach for Composable Volumetric Lenses", "doi": null, "abstractUrl": "/proceedings-article/vr/2008/04480772/12OmNBAqZId", "parentPublication": { "id": "proceedings/vr/2008/1971/0", "title": "IEEE Virtual Reality 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2010/6237/0/05444782", "title": "Single-pass 3D lens rendering and spatiotemporal \"Time Warp\" example", "doi": null, "abstractUrl": "/proceedings-article/vr/2010/05444782/12OmNBO3JYm", "parentPublication": { "id": "proceedings/vr/2010/6237/0", "title": "2010 IEEE Virtual Reality Conference (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2007/1749/0/04538832", "title": "Evaluating Display Types for AR Selection and Annotation", "doi": null, "abstractUrl": "/proceedings-article/ismar/2007/04538832/12OmNrIaef4", "parentPublication": { "id": "proceedings/ismar/2007/1749/0", "title": "2007 6th IEEE and ACM International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2007/1749/0/04538825", "title": "A 3D Flexible and Tangible Magic Lens in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2007/04538825/12OmNwNwzHD", "parentPublication": { "id": "proceedings/ismar/2007/1749/0", "title": "2007 6th IEEE and ACM International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2010/6237/0/05444818", "title": "An evaluation of physical affordances in augmented virtual environments: Dataset grounding and Magic Lens", "doi": null, "abstractUrl": "/proceedings-article/vr/2010/05444818/12OmNwwd2PF", "parentPublication": { "id": "proceedings/vr/2010/6237/0", "title": "2010 IEEE Virtual Reality Conference (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504757", "title": "Combining eye tracking with optimizations for lens astigmatism in modern wide-angle HMDs", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504757/12OmNySG3Vp", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532818", "title": "The magic volume lens: an interactive focus+context technique for volume rendering", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532818/12OmNyuyade", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2012/4660/0/06402557", "title": "A hand-held AR magic lens with user-perspective rendering", "doi": null, "abstractUrl": "/proceedings-article/ismar/2012/06402557/12OmNz5s0SW", "parentPublication": { "id": "proceedings/ismar/2012/4660/0", "title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/1997/06/mcg1997060062", "title": "Enhanced Illustration Using Magic Lens Filters", "doi": null, "abstractUrl": "/magazine/cg/1997/06/mcg1997060062/13rRUIJuxxP", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2006/04/mcg2006040064", "title": "Magic Lenses for Augmented Virtual Environments", "doi": null, "abstractUrl": "/magazine/cg/2006/04/mcg2006040064/13rRUyZaxsW", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07439844", "articleId": "13rRUy2YLYD", "__typename": "AdjacentArticleType" }, "next": { "fno": "07452668", "articleId": "13rRUwbJD4Q", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTYet1Q", "name": "ttg201707-07460953s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201707-07460953s1.zip", "extension": "zip", "size": "9.95 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNz5apxc", "title": "July", "year": "2017", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "23", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwbJD4Q", "doi": "10.1109/TVCG.2016.2554113", "abstract": "We present DrawFromDrawings, an interactive drawing system that provides users with visual feedback for assistance in 2D drawing using a database of sketch images. Following the traditional imitation and emulation training from art education, DrawFromDrawings enables users to retrieve and refer to a sketch image stored in a database and provides them with various novel strokes as suggestive or deformation feedback. Given regions of interest (ROIs) in the user and reference sketches, DrawFromDrawings detects as-long-as-possible (ALAP) stroke segments and the correspondences between user and reference sketches that are the key to computing seamless interpolations. The stroke-level interpolations are parametrized with the user strokes, the reference strokes, and new strokes created by warping the reference strokes based on the user and reference ROI shapes, and the user study indicated that the interpolation could produce various reasonable strokes varying in shapes and complexity. DrawFromDrawings allows users to either replace their strokes with interpolated strokes (deformation feedback) or overlays interpolated strokes onto their strokes (suggestive feedback). The other user studies on the feedback modes indicated that the suggestive feedback enabled drawers to develop and render their ideas using their own stroke style, whereas the deformation feedback enabled them to finish the sketch composition quickly.", "abstracts": [ { "abstractType": "Regular", "content": "We present DrawFromDrawings, an interactive drawing system that provides users with visual feedback for assistance in 2D drawing using a database of sketch images. Following the traditional imitation and emulation training from art education, DrawFromDrawings enables users to retrieve and refer to a sketch image stored in a database and provides them with various novel strokes as suggestive or deformation feedback. Given regions of interest (ROIs) in the user and reference sketches, DrawFromDrawings detects as-long-as-possible (ALAP) stroke segments and the correspondences between user and reference sketches that are the key to computing seamless interpolations. The stroke-level interpolations are parametrized with the user strokes, the reference strokes, and new strokes created by warping the reference strokes based on the user and reference ROI shapes, and the user study indicated that the interpolation could produce various reasonable strokes varying in shapes and complexity. DrawFromDrawings allows users to either replace their strokes with interpolated strokes (deformation feedback) or overlays interpolated strokes onto their strokes (suggestive feedback). The other user studies on the feedback modes indicated that the suggestive feedback enabled drawers to develop and render their ideas using their own stroke style, whereas the deformation feedback enabled them to finish the sketch composition quickly.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present DrawFromDrawings, an interactive drawing system that provides users with visual feedback for assistance in 2D drawing using a database of sketch images. Following the traditional imitation and emulation training from art education, DrawFromDrawings enables users to retrieve and refer to a sketch image stored in a database and provides them with various novel strokes as suggestive or deformation feedback. Given regions of interest (ROIs) in the user and reference sketches, DrawFromDrawings detects as-long-as-possible (ALAP) stroke segments and the correspondences between user and reference sketches that are the key to computing seamless interpolations. The stroke-level interpolations are parametrized with the user strokes, the reference strokes, and new strokes created by warping the reference strokes based on the user and reference ROI shapes, and the user study indicated that the interpolation could produce various reasonable strokes varying in shapes and complexity. DrawFromDrawings allows users to either replace their strokes with interpolated strokes (deformation feedback) or overlays interpolated strokes onto their strokes (suggestive feedback). The other user studies on the feedback modes indicated that the suggestive feedback enabled drawers to develop and render their ideas using their own stroke style, whereas the deformation feedback enabled them to finish the sketch composition quickly.", "title": "DrawFromDrawings: 2D Drawing Assistance via Stroke Interpolation with a Sketch Database", "normalizedTitle": "DrawFromDrawings: 2D Drawing Assistance via Stroke Interpolation with a Sketch Database", "fno": "07452668", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Interpolation", "Shape", "Feature Extraction", "Animation", "Visual Databases", "Visualization", "Interactive Drawing", "2 D Shape Interpolation" ], "authors": [ { "givenName": "Yusuke", "surname": "Matsui", "fullName": "Yusuke Matsui", "affiliation": "Department of Information and Communication Engineering, The University of Tokyo, Tokyo, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Takaaki", "surname": "Shiratori", "fullName": "Takaaki Shiratori", "affiliation": "Oculus Research Pittsburgh, Facebook Inc, Pittsburgh, PA", "__typename": "ArticleAuthorType" }, { "givenName": "Kiyoharu", "surname": "Aizawa", "fullName": "Kiyoharu Aizawa", "affiliation": "Department of Information and Communication Engineering, The University of Tokyo, Tokyo, Japan", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "07", "pubDate": "2017-07-01 00:00:00", "pubType": "trans", "pages": "1852-1862", "year": "2017", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icassp/1993/0946/1/00319201", "title": "A new stroke string matching algorithm for stroke-based on-line character recognition", "doi": null, "abstractUrl": "/proceedings-article/icassp/1993/00319201/12OmNCdBDIr", "parentPublication": { "id": "proceedings/icassp/1993/0946/1", "title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2017/0560/0/08026301", "title": "Automatic genaration of sketch-like pencil drawing from image", "doi": null, "abstractUrl": "/proceedings-article/icmew/2017/08026301/12OmNqBtj6y", "parentPublication": { "id": "proceedings/icmew/2017/0560/0", "title": "2017 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icfhr/2014/4335/0/06981006", "title": "Online Handwritten Stroke Type Determination Using Descriptors Based on Spatially and Temporally Neighboring Strokes", "doi": null, "abstractUrl": "/proceedings-article/icfhr/2014/06981006/12OmNvAAtJn", "parentPublication": { "id": "proceedings/icfhr/2014/4335/0", "title": "2014 14th International Conference on Frontiers in Handwriting Recognition (ICFHR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2015/1805/0/07333715", "title": "A Polar Stroke Descriptor for classification of historical documents", "doi": null, "abstractUrl": "/proceedings-article/icdar/2015/07333715/12OmNxGALiC", "parentPublication": { "id": "proceedings/icdar/2015/1805/0", "title": "2015 13th International Conference on Document Analysis and Recognition (ICDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/1995/7128/1/71280179", "title": "Stroke-based time warping for signature verification", "doi": null, "abstractUrl": "/proceedings-article/icdar/1995/71280179/12OmNy7h3bE", "parentPublication": { "id": "proceedings/icdar/1995/7128/1", "title": "Proceedings of 3rd International Conference on Document Analysis and Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmip/2017/5954/0/5954a068", "title": "Stroke Extraction of Handwritten Chinese Character Based on Ambiguous Zone Information", "doi": null, "abstractUrl": "/proceedings-article/icmip/2017/5954a068/12OmNzmLxQL", "parentPublication": { "id": "proceedings/icmip/2017/5954/0", "title": "2017 2nd International Conference on Multimedia and Image Processing (ICMIP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/1992/2915/0/00201752", "title": "A probabilistic stroke-based Viterbi algorithm for handwritten Chinese characters recognition", "doi": null, "abstractUrl": "/proceedings-article/icpr/1992/00201752/12OmNzvQHMY", "parentPublication": { "id": "proceedings/icpr/1992/2915/0", "title": "11th IAPR International Conference on Pattern Recognition. Vol.II. Conference B: Pattern Recognition Methodology and Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/02/07831370", "title": "Context-Aware Computer Aided Inbetweening", "doi": null, "abstractUrl": "/journal/tg/2018/02/07831370/13rRUxNEqQ1", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000i014", "title": "Learning Deep Sketch Abstraction", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000i014/17D45WLdYQJ", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900f127", "title": "SSR-GNNs: Stroke-based Sketch Representation with Graph Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900f127/1G56vex6Ni0", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07460953", "articleId": "13rRUILc8ff", "__typename": "AdjacentArticleType" }, "next": { "fno": "07445239", "articleId": "13rRUNvgz4m", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXnFq4", "name": "ttg201707-07452668s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201707-07452668s1.zip", "extension": "zip", "size": "39.1 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNz5apxc", "title": "July", "year": "2017", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "23", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUNvgz4m", "doi": "10.1109/TVCG.2016.2549018", "abstract": "We systematically reviewed 64 user-study papers on data glyphs to help researchers and practitioners gain an informed understanding of tradeoffs in the glyph design space. The glyphs we consider are individual representations of multi-dimensional data points, often meant to be shown in small-multiple settings. Over the past 60 years many different glyph designs were proposed and many of these designs have been subjected to perceptual or comparative evaluations. Yet, a systematic overview of the types of glyphs and design variations tested, the tasks under which they were analyzed, or even the study goals and results does not yet exist. In this paper we provide such an overview by systematically sampling and tabulating the literature on data glyph studies, listing their designs, questions, data, and tasks. In addition we present a concise overview of the types of glyphs and their design characteristics analyzed by researchers in the past, and a synthesis of the study results. Based on our meta analysis of all results we further contribute a set of design implications and a discussion on open research directions.", "abstracts": [ { "abstractType": "Regular", "content": "We systematically reviewed 64 user-study papers on data glyphs to help researchers and practitioners gain an informed understanding of tradeoffs in the glyph design space. The glyphs we consider are individual representations of multi-dimensional data points, often meant to be shown in small-multiple settings. Over the past 60 years many different glyph designs were proposed and many of these designs have been subjected to perceptual or comparative evaluations. Yet, a systematic overview of the types of glyphs and design variations tested, the tasks under which they were analyzed, or even the study goals and results does not yet exist. In this paper we provide such an overview by systematically sampling and tabulating the literature on data glyph studies, listing their designs, questions, data, and tasks. In addition we present a concise overview of the types of glyphs and their design characteristics analyzed by researchers in the past, and a synthesis of the study results. Based on our meta analysis of all results we further contribute a set of design implications and a discussion on open research directions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We systematically reviewed 64 user-study papers on data glyphs to help researchers and practitioners gain an informed understanding of tradeoffs in the glyph design space. The glyphs we consider are individual representations of multi-dimensional data points, often meant to be shown in small-multiple settings. Over the past 60 years many different glyph designs were proposed and many of these designs have been subjected to perceptual or comparative evaluations. Yet, a systematic overview of the types of glyphs and design variations tested, the tasks under which they were analyzed, or even the study goals and results does not yet exist. In this paper we provide such an overview by systematically sampling and tabulating the literature on data glyph studies, listing their designs, questions, data, and tasks. In addition we present a concise overview of the types of glyphs and their design characteristics analyzed by researchers in the past, and a synthesis of the study results. Based on our meta analysis of all results we further contribute a set of design implications and a discussion on open research directions.", "title": "A Systematic Review of Experimental Studies on Data Glyphs", "normalizedTitle": "A Systematic Review of Experimental Studies on Data Glyphs", "fno": "07445239", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Visualization", "Data Visualization", "Systematics", "Layout", "Encoding", "Guidelines", "Survey", "Glyphs", "Quantitative Evaluation", "Glyph Design" ], "authors": [ { "givenName": "Johannes", "surname": "Fuchs", "fullName": "Johannes Fuchs", "affiliation": "University of Konstanz, Konstanz, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Petra", "surname": "Isenberg", "fullName": "Petra Isenberg", "affiliation": "Inria, Paris, France", "__typename": "ArticleAuthorType" }, { "givenName": "Anastasia", "surname": "Bezerianos", "fullName": "Anastasia Bezerianos", "affiliation": "Univ Paris Sud, CNRS & Inria, Paris, France", "__typename": "ArticleAuthorType" }, { "givenName": "Daniel", "surname": "Keim", "fullName": "Daniel Keim", "affiliation": "University of Konstanz, Konstanz, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2017-07-01 00:00:00", "pubType": "trans", "pages": "1863-1879", "year": "2017", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tg/2014/12/06875973", "title": "The Influence of Contour on Similarity Perception of Star Glyphs", "doi": null, "abstractUrl": "/journal/tg/2014/12/06875973/13rRUwhHcQV", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/1996/03/v0266", "title": "Glyphs for Visualizing Uncertainty in Vector Fields", "doi": null, "abstractUrl": "/journal/tg/1996/03/v0266/13rRUxly8SN", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/08/ttg2013081331", "title": "Representing Flow Patterns by Using Streamlines with Glyphs", "doi": null, "abstractUrl": "/journal/tg/2013/08/ttg2013081331/13rRUxly9dT", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2018/7202/0/720200a058", "title": "Visualizing Multidimensional Data in Treemaps with Adaptive Glyphs", "doi": null, "abstractUrl": "/proceedings-article/iv/2018/720200a058/17D45XeKgvR", "parentPublication": { "id": "proceedings/iv/2018/7202/0", "title": "2018 22nd International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09930144", "title": "Out of the Plane: Flower Vs. Star Glyphs to Support High-Dimensional Exploration in Two-Dimensional Embeddings", "doi": null, "abstractUrl": "/journal/tg/5555/01/09930144/1HMOX2J2VMY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2019/2838/0/283800a157", "title": "Evaluation of Effectiveness of Glyphs to Enhance ChronoView", "doi": null, "abstractUrl": "/proceedings-article/iv/2019/283800a157/1cMF9mvWMFO", "parentPublication": { "id": "proceedings/iv/2019/2838/0", "title": "2019 23rd International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/09/09067088", "title": "AgentVis: Visual Analysis of Agent Behavior With Hierarchical Glyphs", "doi": null, "abstractUrl": "/journal/tg/2021/09/09067088/1j1lyTz50k0", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2020/9134/0/913400a242", "title": "A summarization glyph for sets of unreadable visual items in treemaps", "doi": null, "abstractUrl": "/proceedings-article/iv/2020/913400a242/1rSRaQV3b3y", "parentPublication": { "id": "proceedings/iv/2020/9134/0", "title": "2020 24th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/01/09552906", "title": "Generative Design Inspiration for Glyphs with Diatoms", "doi": null, "abstractUrl": "/journal/tg/2022/01/09552906/1xic46x3fmU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/01/09557223", "title": "GlyphCreator: Towards Example-based Automatic Generation of Circular Glyphs", "doi": null, "abstractUrl": "/journal/tg/2022/01/09557223/1xlvZajdjmo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07452668", "articleId": "13rRUwbJD4Q", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNqESuig", "title": "May/June", "year": "2010", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "16", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUx0gev3", "doi": "10.1109/TVCG.2010.51", "abstract": null, "abstracts": [ { "abstractType": "Regular", "content": "", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": null, "title": "Guest Editors' Introduction: Special Section on The International Symposium on Mixed and Augmented Reality (ISMAR)", "normalizedTitle": "Guest Editors' Introduction: Special Section on The International Symposium on Mixed and Augmented Reality (ISMAR)", "fno": "ttg2010030353", "hasPdf": true, "idPrefix": "tg", "keywords": [], "authors": [ { "givenName": "Mark A.", "surname": "Livingston", "fullName": "Mark A. Livingston", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Ronald T.", "surname": "Azuma", "fullName": "Ronald T. Azuma", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Oliver", "surname": "Bimber", "fullName": "Oliver Bimber", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Hideo", "surname": "Saito", "fullName": "Hideo Saito", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "03", "pubDate": "2010-05-01 00:00:00", "pubType": "trans", "pages": "353-354", "year": "2010", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tg/2016/07/07478595", "title": "Guest Editors’ Introduction: Special Section on the IEEE Pacific Visualization Symposium 2015", "doi": null, "abstractUrl": "/journal/tg/2016/07/07478595/13rRUEgarsL", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/06/ttg2013060898", "title": "Guest Editors' Introduction: Special Section on the IEEE Pacific Visualization Symposium 2012", "doi": null, "abstractUrl": "/journal/tg/2013/06/ttg2013060898/13rRUNvgziD", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/2012/08/ttc2012081057", "title": "Guest Editors' Introduction: Special Section on Computer Arithmetic", "doi": null, "abstractUrl": "/journal/tc/2012/08/ttc2012081057/13rRUNvyajT", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/08/07138667", "title": "Guest Editors’ Introduction: Special Section on the IEEE Pacific Visualization Symposium 2014", "doi": null, "abstractUrl": "/journal/tg/2015/08/07138667/13rRUwI5Ugf", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/10/ttg2012101589", "title": "Guest Editors' Introduction: Special Section on the Symposium on Interactive 3D Graphics and Games (I3D)", "doi": null, "abstractUrl": "/journal/tg/2012/10/ttg2012101589/13rRUwdIOUI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/10/ttg2011101353", "title": "Guest Editors' Introduction: Special Section on the IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "doi": null, "abstractUrl": "/journal/tg/2011/10/ttg2011101353/13rRUx0xPZx", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/08/06847259", "title": "Guest Editors&#x0027; Introduction: Special Section on the IEEE Pacific Visualization Symposium", "doi": null, "abstractUrl": "/journal/tg/2014/08/06847259/13rRUxD9gXJ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/10/06881790", "title": "Guest Editors' Introduction: Special Section on the ACM SIGGRAPH/Eurographics Symposium on Computer Animation (SCA)", "doi": null, "abstractUrl": "/journal/tg/2014/10/06881790/13rRUy0HYRq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/2011/02/ttc2011020145", "title": "Guest Editors' Introduction: Special Section on Computer Arithmetic", "doi": null, "abstractUrl": "/journal/tc/2011/02/ttc2011020145/13rRUyuvRwz", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": null, "next": { "fno": "ttg2010030355", "articleId": "13rRUwIF69g", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNqESuig", "title": "May/June", "year": "2010", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "16", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwIF69g", "doi": "10.1109/TVCG.2009.99", "abstract": "In this paper, we present three techniques for 6DOF natural feature tracking in real time on mobile phones. We achieve interactive frame rates of up to 30 Hz for natural feature tracking from textured planar targets on current generation phones. We use an approach based on heavily modified state-of-the-art feature descriptors, namely SIFT and Ferns plus a template-matching-based tracker. While SIFT is known to be a strong, but computationally expensive feature descriptor, Ferns classification is fast, but requires large amounts of memory. This renders both original designs unsuitable for mobile phones. We give detailed descriptions on how we modified both approaches to make them suitable for mobile phones. The template-based tracker further increases the performance and robustness of the SIFT- and Ferns-based approaches. We present evaluations on robustness and performance and discuss their appropriateness for Augmented Reality applications.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we present three techniques for 6DOF natural feature tracking in real time on mobile phones. We achieve interactive frame rates of up to 30 Hz for natural feature tracking from textured planar targets on current generation phones. We use an approach based on heavily modified state-of-the-art feature descriptors, namely SIFT and Ferns plus a template-matching-based tracker. While SIFT is known to be a strong, but computationally expensive feature descriptor, Ferns classification is fast, but requires large amounts of memory. This renders both original designs unsuitable for mobile phones. We give detailed descriptions on how we modified both approaches to make them suitable for mobile phones. The template-based tracker further increases the performance and robustness of the SIFT- and Ferns-based approaches. We present evaluations on robustness and performance and discuss their appropriateness for Augmented Reality applications.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we present three techniques for 6DOF natural feature tracking in real time on mobile phones. We achieve interactive frame rates of up to 30 Hz for natural feature tracking from textured planar targets on current generation phones. We use an approach based on heavily modified state-of-the-art feature descriptors, namely SIFT and Ferns plus a template-matching-based tracker. While SIFT is known to be a strong, but computationally expensive feature descriptor, Ferns classification is fast, but requires large amounts of memory. This renders both original designs unsuitable for mobile phones. We give detailed descriptions on how we modified both approaches to make them suitable for mobile phones. The template-based tracker further increases the performance and robustness of the SIFT- and Ferns-based approaches. We present evaluations on robustness and performance and discuss their appropriateness for Augmented Reality applications.", "title": "Real-Time Detection and Tracking for Augmented Reality on Mobile Phones", "normalizedTitle": "Real-Time Detection and Tracking for Augmented Reality on Mobile Phones", "fno": "ttg2010030355", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Information Interfaces And Presentation", "Multimedia Information Systems", "Artificial", "Augmented", "And Virtual Realities", "Image Processing And Computer Vision", "Scene Analysis", "Tracking" ], "authors": [ { "givenName": "Daniel", "surname": "Wagner", "fullName": "Daniel Wagner", "affiliation": "Graz University of Technology, Graz", "__typename": "ArticleAuthorType" }, { "givenName": "Gerhard", "surname": "Reitmayr", "fullName": "Gerhard Reitmayr", "affiliation": "Cambridge University, Cambridge", "__typename": "ArticleAuthorType" }, { "givenName": "Alessandro", "surname": "Mulloni", "fullName": "Alessandro Mulloni", "affiliation": "Graz University of Technology, Graz", "__typename": "ArticleAuthorType" }, { "givenName": "Tom", "surname": "Drummond", "fullName": "Tom Drummond", "affiliation": "University of Cambridge, Cambridge", "__typename": "ArticleAuthorType" }, { "givenName": "Dieter", "surname": "Schmalstieg", "fullName": "Dieter Schmalstieg", "affiliation": "Graz University of Technology, Graz", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2010-05-01 00:00:00", "pubType": "trans", "pages": "355-368", "year": "2010", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2012/4660/0/06402569", "title": "Superman-like X-ray vision: Towards brain-computer interfaces for medical augmented reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2012/06402569/12OmNAoUT3L", "parentPublication": { "id": "proceedings/ismar/2012/4660/0", "title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2012/4660/0/06402542", "title": "Optical outside-in tracking using unmodified mobile phones", "doi": null, "abstractUrl": "/proceedings-article/ismar/2012/06402542/12OmNqNXEoZ", "parentPublication": { "id": "proceedings/ismar/2012/4660/0", "title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2012/4660/0/06402555", "title": "Image-driven view management for augmented reality browsers", "doi": null, "abstractUrl": "/proceedings-article/ismar/2012/06402555/12OmNvoFjTz", "parentPublication": { "id": "proceedings/ismar/2012/4660/0", "title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2008/2840/0/04637338", "title": "Pose tracking from natural features on mobile phones", "doi": null, "abstractUrl": "/proceedings-article/ismar/2008/04637338/12OmNvpw7gU", "parentPublication": { "id": "proceedings/ismar/2008/2840/0", "title": "2008 7th IEEE/ACM International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigmm/2015/8688/0/8688a136", "title": "Real-Time Tracking with Selective DoP-RIEF Features for Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/bigmm/2015/8688a136/12OmNvxsSSW", "parentPublication": { "id": "proceedings/bigmm/2015/8688/0", "title": "2015 IEEE International Conference on Multimedia Big Data (BigMM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2010/7029/0/05543249", "title": "Location-based augmented reality on mobile phones", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2010/05543249/12OmNzTppBX", "parentPublication": { "id": "proceedings/cvprw/2010/7029/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2015/6683/0/6683a041", "title": "Multi-person Tracking Based on Body Parts and Online Random Ferns Learning of Thermal Images", "doi": null, "abstractUrl": "/proceedings-article/wacv/2015/6683a041/12OmNzwZ6l2", "parentPublication": { "id": "proceedings/wacv/2015/6683/0", "title": "2015 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2009/02/mpc2009020008", "title": "What Wearable Augmented Reality Can Do for You", "doi": null, "abstractUrl": "/magazine/pc/2009/02/mpc2009020008/13rRUEgaryp", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/03/ttg2009030355", "title": "Multithreaded Hybrid Feature Tracking for Markerless Augmented Reality", "doi": null, "abstractUrl": "/journal/tg/2009/03/ttg2009030355/13rRUwdrdSt", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/10/ttg2011101369", "title": "Shape Recognition and Pose Estimation for Mobile Augmented Reality", "doi": null, "abstractUrl": "/journal/tg/2011/10/ttg2011101369/13rRUyft7D0", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2010030353", "articleId": "13rRUx0gev3", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2010030369", "articleId": "13rRUxjQybP", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNqESuig", "title": "May/June", "year": "2010", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "16", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxjQybP", "doi": "10.1109/TVCG.2009.210", "abstract": "Video see-through Augmented Reality adds computer graphics to the real world in real time by overlaying graphics onto a live video feed. To achieve a realistic integration of the virtual and real imagery, the rendered images should have a similar appearance and quality to those produced by the video camera. This paper describes a compositing method which models the artifacts produced by a small low-cost camera, and adds these effects to an ideal pinhole image produced by conventional rendering methods. We attempt to model and simulate each step of the imaging process, including distortions, chromatic aberrations, blur, Bayer masking, noise, sharpening, and color-space compression, all while requiring only an RGBA image and an estimate of camera velocity as inputs.", "abstracts": [ { "abstractType": "Regular", "content": "Video see-through Augmented Reality adds computer graphics to the real world in real time by overlaying graphics onto a live video feed. To achieve a realistic integration of the virtual and real imagery, the rendered images should have a similar appearance and quality to those produced by the video camera. This paper describes a compositing method which models the artifacts produced by a small low-cost camera, and adds these effects to an ideal pinhole image produced by conventional rendering methods. We attempt to model and simulate each step of the imaging process, including distortions, chromatic aberrations, blur, Bayer masking, noise, sharpening, and color-space compression, all while requiring only an RGBA image and an estimate of camera velocity as inputs.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Video see-through Augmented Reality adds computer graphics to the real world in real time by overlaying graphics onto a live video feed. To achieve a realistic integration of the virtual and real imagery, the rendered images should have a similar appearance and quality to those produced by the video camera. This paper describes a compositing method which models the artifacts produced by a small low-cost camera, and adds these effects to an ideal pinhole image produced by conventional rendering methods. We attempt to model and simulate each step of the imaging process, including distortions, chromatic aberrations, blur, Bayer masking, noise, sharpening, and color-space compression, all while requiring only an RGBA image and an estimate of camera velocity as inputs.", "title": "Simulating Low-Cost Cameras for Augmented Reality Compositing", "normalizedTitle": "Simulating Low-Cost Cameras for Augmented Reality Compositing", "fno": "ttg2010030369", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Artificial", "Augmented", "And Virtual Realities", "Visualization", "Compositing" ], "authors": [ { "givenName": "Georg", "surname": "Klein", "fullName": "Georg Klein", "affiliation": "University of Oxford, Oxford", "__typename": "ArticleAuthorType" }, { "givenName": "David W.", "surname": "Murray", "fullName": "David W. Murray", "affiliation": "University of Oxford, Oxford", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2010-05-01 00:00:00", "pubType": "trans", "pages": "369-380", "year": "2010", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2008/2840/0/04637324", "title": "Compositing for small cameras", "doi": null, "abstractUrl": "/proceedings-article/ismar/2008/04637324/12OmNAObbKT", "parentPublication": { "id": "proceedings/ismar/2008/2840/0", "title": "2008 7th IEEE/ACM International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2002/1781/0/17810281", "title": "Testable Design Representations for Mobile Augmented Reality Authoring", "doi": null, "abstractUrl": "/proceedings-article/ismar/2002/17810281/12OmNwJgANw", "parentPublication": { "id": "proceedings/ismar/2002/1781/0", "title": "Proceedings. International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2005/8929/0/01492774", "title": "Stylized augmented reality for improved immersion", "doi": null, "abstractUrl": "/proceedings-article/vr/2005/01492774/12OmNylbotS", "parentPublication": { "id": "proceedings/vr/2005/8929/0", "title": "IEEE Virtual Reality 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2009/02/mpc2009020008", "title": "What Wearable Augmented Reality Can Do for You", "doi": null, "abstractUrl": "/magazine/pc/2009/02/mpc2009020008/13rRUEgaryp", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2005/06/mcg2005060048", "title": "Augmented Reality Projects in the Automotive and Aerospace Industries", "doi": null, "abstractUrl": "/magazine/cg/2005/06/mcg2005060048/13rRUwghdbu", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/02/ttg2009020193", "title": "Comprehensible Visualization for Augmented Reality", "doi": null, "abstractUrl": "/journal/tg/2009/02/ttg2009020193/13rRUxASuGc", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2000/04/v0346", "title": "Calibration-Free Augmented Reality in Perspective", "doi": null, "abstractUrl": "/journal/tg/2000/04/v0346/13rRUxOdD2s", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/03/ttg2008030513", "title": "Usability Engineering for Augmented Reality: Employing User-Based Studies to Inform Design", "doi": null, "abstractUrl": "/journal/tg/2008/03/ttg2008030513/13rRUy3gn7q", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/10/ttg2011101369", "title": "Shape Recognition and Pose Estimation for Mobile Augmented Reality", "doi": null, "abstractUrl": "/journal/tg/2011/10/ttg2011101369/13rRUyft7D0", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a682", "title": "CatARact: Simulating Cataracts in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a682/1pysul75Wc8", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2010030355", "articleId": "13rRUwIF69g", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2010030381", "articleId": "13rRUyYSWsN", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNqESuig", "title": "May/June", "year": "2010", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "16", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUyYSWsN", "doi": "10.1109/TVCG.2009.95", "abstract": "We present the design and implementation of an optical see-through head-mounted display (HMD) with addressable focus cues utilizing a liquid lens. We implemented a monocular bench prototype capable of addressing the focal distance of the display from infinity to as close as 8 diopters. Two operation modes of the system were demonstrated: a vari-focal plane mode in which the accommodation cue is addressable, and a time-multiplexed multi-focal plane mode in which both the accommodation and retinal blur cues can be rendered. We further performed experiments to assess the depth perception and eye accommodative response of the system operated in a vari-focal plane mode. Both subjective and objective measurements suggest that the perceived depths and accommodative responses of the user match with the rendered depths of the virtual display with addressable accommodation cues, approximating the real-world 3-D viewing condition.", "abstracts": [ { "abstractType": "Regular", "content": "We present the design and implementation of an optical see-through head-mounted display (HMD) with addressable focus cues utilizing a liquid lens. We implemented a monocular bench prototype capable of addressing the focal distance of the display from infinity to as close as 8 diopters. Two operation modes of the system were demonstrated: a vari-focal plane mode in which the accommodation cue is addressable, and a time-multiplexed multi-focal plane mode in which both the accommodation and retinal blur cues can be rendered. We further performed experiments to assess the depth perception and eye accommodative response of the system operated in a vari-focal plane mode. Both subjective and objective measurements suggest that the perceived depths and accommodative responses of the user match with the rendered depths of the virtual display with addressable accommodation cues, approximating the real-world 3-D viewing condition.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present the design and implementation of an optical see-through head-mounted display (HMD) with addressable focus cues utilizing a liquid lens. We implemented a monocular bench prototype capable of addressing the focal distance of the display from infinity to as close as 8 diopters. Two operation modes of the system were demonstrated: a vari-focal plane mode in which the accommodation cue is addressable, and a time-multiplexed multi-focal plane mode in which both the accommodation and retinal blur cues can be rendered. We further performed experiments to assess the depth perception and eye accommodative response of the system operated in a vari-focal plane mode. Both subjective and objective measurements suggest that the perceived depths and accommodative responses of the user match with the rendered depths of the virtual display with addressable accommodation cues, approximating the real-world 3-D viewing condition.", "title": "A Novel Prototype for an Optical See-Through Head-Mounted Display with Addressable Focus Cues", "normalizedTitle": "A Novel Prototype for an Optical See-Through Head-Mounted Display with Addressable Focus Cues", "fno": "ttg2010030381", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Three Dimensional Displays", "Mixed And Augmented Reality", "Focus Cues", "Accommodation", "Retinal Blur", "Convergence", "User Studies" ], "authors": [ { "givenName": "Sheng", "surname": "Liu", "fullName": "Sheng Liu", "affiliation": "University of Arizona, Tucson", "__typename": "ArticleAuthorType" }, { "givenName": "Hong", "surname": "Hua", "fullName": "Hong Hua", "affiliation": "University of Arizona, Tucson", "__typename": "ArticleAuthorType" }, { "givenName": "Dewen", "surname": "Cheng", "fullName": "Dewen Cheng", "affiliation": "University of Arizona, Tucson and Beijing Institute of Technology, Beijing", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2010-05-01 00:00:00", "pubType": "trans", "pages": "381-393", "year": "2010", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iscv/1995/7190/0/71900329", "title": "Range segmentation using focus cues", "doi": null, "abstractUrl": "/proceedings-article/iscv/1995/71900329/12OmNCmpcTg", "parentPublication": { "id": "proceedings/iscv/1995/7190/0", "title": "Computer Vision, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2008/2840/0/04637321", "title": "An optical see-through head mounted display with addressable focal planes", "doi": null, "abstractUrl": "/proceedings-article/ismar/2008/04637321/12OmNwe2IAw", "parentPublication": { "id": "proceedings/ismar/2008/2840/0", "title": "2008 7th IEEE/ACM International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2011/01/ttp2011010101", "title": "Multiperson Visual Focus of Attention from Head Pose and Meeting Contextual Cues", "doi": null, "abstractUrl": "/journal/tp/2011/01/ttp2011010101/13rRUwcAqrk", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/1995/12/i1213", "title": "Performance Analysis of Stereo, Vergence, and Focus as Depth Cues for Active Vision", "doi": null, "abstractUrl": "/journal/tp/1995/12/i1213/13rRUwcS1DZ", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/05/09714124", "title": "Video See-Through Mixed Reality with Focus Cues", "doi": null, "abstractUrl": "/journal/tg/2022/05/09714124/1B0XWyWo5KE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2010030369", "articleId": "13rRUxjQybP", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2010030394", "articleId": "13rRUypp57y", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTYesZ3", "name": "ttg2010030381s.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2010030381s.zip", "extension": "zip", "size": "9.37 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNqESuig", "title": "May/June", "year": "2010", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "16", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUypp57y", "doi": "10.1109/TVCG.2009.89", "abstract": "We present and evaluate a new approach for real-time rendering of composable 3D lenses for polygonal scenes. Such lenses, usually called “volumetric lenses,” are an extension of 2D Magic Lenses to 3D volumes in which effects are applied to scene elements. Although the composition of 2D lenses is well known, 3D composition was long considered infeasible due to both geometric and semantic complexity. Nonetheless, for a scene with multiple interactive 3D lenses, the problem of intersecting lenses must be considered. Intersecting 3D lenses in meaningful ways supports new interfaces such as hierarchical 3D windows, 3D lenses for managing and composing visualization options, or interactive shader development by direct manipulation of lenses providing component effects. Our 3D volumetric lens approach differs from other approaches and is one of the first to address efficient composition of multiple lenses. It is well-suited to head-tracked VR environments because it requires no view-dependent generation of major data structures, allowing caching and reuse of full or partial results. A Composite Shader Factory module composes shader programs for rendering composite visual styles and geometry of intersection regions. Geometry is handled by Boolean combinations of region tests in fragment shaders, which allows both convex and nonconvex CSG volumes for lens shape. Efficiency is further addressed by a Region Analyzer module and by broad-phase culling. Finally, we consider the handling of order effects for composed 3D lenses.", "abstracts": [ { "abstractType": "Regular", "content": "We present and evaluate a new approach for real-time rendering of composable 3D lenses for polygonal scenes. Such lenses, usually called “volumetric lenses,” are an extension of 2D Magic Lenses to 3D volumes in which effects are applied to scene elements. Although the composition of 2D lenses is well known, 3D composition was long considered infeasible due to both geometric and semantic complexity. Nonetheless, for a scene with multiple interactive 3D lenses, the problem of intersecting lenses must be considered. Intersecting 3D lenses in meaningful ways supports new interfaces such as hierarchical 3D windows, 3D lenses for managing and composing visualization options, or interactive shader development by direct manipulation of lenses providing component effects. Our 3D volumetric lens approach differs from other approaches and is one of the first to address efficient composition of multiple lenses. It is well-suited to head-tracked VR environments because it requires no view-dependent generation of major data structures, allowing caching and reuse of full or partial results. A Composite Shader Factory module composes shader programs for rendering composite visual styles and geometry of intersection regions. Geometry is handled by Boolean combinations of region tests in fragment shaders, which allows both convex and nonconvex CSG volumes for lens shape. Efficiency is further addressed by a Region Analyzer module and by broad-phase culling. Finally, we consider the handling of order effects for composed 3D lenses.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present and evaluate a new approach for real-time rendering of composable 3D lenses for polygonal scenes. Such lenses, usually called “volumetric lenses,” are an extension of 2D Magic Lenses to 3D volumes in which effects are applied to scene elements. Although the composition of 2D lenses is well known, 3D composition was long considered infeasible due to both geometric and semantic complexity. Nonetheless, for a scene with multiple interactive 3D lenses, the problem of intersecting lenses must be considered. Intersecting 3D lenses in meaningful ways supports new interfaces such as hierarchical 3D windows, 3D lenses for managing and composing visualization options, or interactive shader development by direct manipulation of lenses providing component effects. Our 3D volumetric lens approach differs from other approaches and is one of the first to address efficient composition of multiple lenses. It is well-suited to head-tracked VR environments because it requires no view-dependent generation of major data structures, allowing caching and reuse of full or partial results. A Composite Shader Factory module composes shader programs for rendering composite visual styles and geometry of intersection regions. Geometry is handled by Boolean combinations of region tests in fragment shaders, which allows both convex and nonconvex CSG volumes for lens shape. Efficiency is further addressed by a Region Analyzer module and by broad-phase culling. Finally, we consider the handling of order effects for composed 3D lenses.", "title": "Real-Time Rendering Method and Performance Evaluation of Composable 3D Lenses for Interactive VR", "normalizedTitle": "Real-Time Rendering Method and Performance Evaluation of Composable 3D Lenses for Interactive VR", "fno": "ttg2010030394", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Interaction Styles", "Virtual Reality", "Volumetric Lens", "Windowing Systems" ], "authors": [ { "givenName": "Christoph W.", "surname": "Borst", "fullName": "Christoph W. Borst", "affiliation": "University of Louisiana at Lafayette, Lafayette", "__typename": "ArticleAuthorType" }, { "givenName": "Jan-Phillip", "surname": "Tiesel", "fullName": "Jan-Phillip Tiesel", "affiliation": "University of Louisiana at Lafayette, Lafayette", "__typename": "ArticleAuthorType" }, { "givenName": "Christopher M.", "surname": "Best", "fullName": "Christopher M. Best", "affiliation": "Willian J. Hughes Technical Center, Atlantic City International Airport", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2010-05-01 00:00:00", "pubType": "trans", "pages": "394-410", "year": "2010", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/1998/9176/0/91760305", "title": "Real-Time Techniques for 3D Flow Visualization", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1998/91760305/12OmNAZOJWZ", "parentPublication": { "id": "proceedings/ieee-vis/1998/9176/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2008/1971/0/04480772", "title": "New Rendering Approach for Composable Volumetric Lenses", "doi": null, "abstractUrl": "/proceedings-article/vr/2008/04480772/12OmNBAqZId", "parentPublication": { "id": "proceedings/vr/2008/1971/0", "title": "IEEE Virtual Reality 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2010/6237/0/05444782", "title": "Single-pass 3D lens rendering and spatiotemporal \"Time Warp\" example", "doi": null, "abstractUrl": "/proceedings-article/vr/2010/05444782/12OmNBO3JYm", "parentPublication": { "id": "proceedings/vr/2010/6237/0", "title": "2010 IEEE Virtual Reality Conference (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2013/5049/0/5049a539", "title": "Comparison of Advanced and Standard Real-Time 3D Rendering Methods for Interactive Landscapes (Short Paper Version)", "doi": null, "abstractUrl": "/proceedings-article/iv/2013/5049a539/12OmNwNwzIB", "parentPublication": { "id": "proceedings/iv/2013/5049/0", "title": "2013 17th International Conference on Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2008/3268/0/3268a356", "title": "3D Generalization Lenses for Interactive Focus + Context Visualization of Virtual City Models", "doi": null, "abstractUrl": "/proceedings-article/iv/2008/3268a356/12OmNzaQoEB", "parentPublication": { "id": "proceedings/iv/2008/3268/0", "title": "2008 12th International Conference Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/27660047", "title": "The Magic Volume Lens: An Interactive Focus+Context Technique for Volume Rendering", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660047/12OmNzmLxM5", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/03/ttg2010030455", "title": "Representation-Independent In-Place Magnification with Sigma Lenses", "doi": null, "abstractUrl": "/journal/tg/2010/03/ttg2010030455/13rRUwInvf1", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/06/ttg2009061587", "title": "GL4D: A GPU-based Architecture for Interactive 4D Visualization", "doi": null, "abstractUrl": "/journal/tg/2009/06/ttg2009061587/13rRUwjoNwY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/09/ttg2011091259", "title": "Single-Pass Composable 3D Lens Rendering and Spatiotemporal 3D Lenses", "doi": null, "abstractUrl": "/journal/tg/2011/09/ttg2011091259/13rRUwkfAZe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/scivis/2018/6882/0/08823618", "title": "3De Interactive Lenses for Visualization in Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/scivis/2018/08823618/1d5kwZvgfNm", "parentPublication": { "id": "proceedings/scivis/2018/6882/0", "title": "2018 IEEE Scientific Visualization Conference (SciVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2010030381", "articleId": "13rRUyYSWsN", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2010030407", "articleId": "13rRUxYINf5", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNqESuig", "title": "May/June", "year": "2010", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "16", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxYINf5", "doi": "10.1109/TVCG.2009.88", "abstract": "This paper presents a robust multiview stereo (MVS) algorithm for free-viewpoint video. Our MVS scheme is totally point-cloud-based and consists of three stages: point cloud extraction, merging, and meshing. To guarantee reconstruction accuracy, point clouds are first extracted according to a stereo matching metric which is robust to noise, occlusion, and lack of texture. Visual hull information, frontier points, and implicit points are then detected and fused with point fidelity information in the merging and meshing steps. All aspects of our method are designed to counteract potential challenges in MVS data sets for accurate and complete model reconstruction. Experimental results demonstrate that our technique produces the most competitive performance among current algorithms under sparse viewpoint setups according to both static and motion MVS data sets.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a robust multiview stereo (MVS) algorithm for free-viewpoint video. Our MVS scheme is totally point-cloud-based and consists of three stages: point cloud extraction, merging, and meshing. To guarantee reconstruction accuracy, point clouds are first extracted according to a stereo matching metric which is robust to noise, occlusion, and lack of texture. Visual hull information, frontier points, and implicit points are then detected and fused with point fidelity information in the merging and meshing steps. All aspects of our method are designed to counteract potential challenges in MVS data sets for accurate and complete model reconstruction. Experimental results demonstrate that our technique produces the most competitive performance among current algorithms under sparse viewpoint setups according to both static and motion MVS data sets.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a robust multiview stereo (MVS) algorithm for free-viewpoint video. Our MVS scheme is totally point-cloud-based and consists of three stages: point cloud extraction, merging, and meshing. To guarantee reconstruction accuracy, point clouds are first extracted according to a stereo matching metric which is robust to noise, occlusion, and lack of texture. Visual hull information, frontier points, and implicit points are then detected and fused with point fidelity information in the merging and meshing steps. All aspects of our method are designed to counteract potential challenges in MVS data sets for accurate and complete model reconstruction. Experimental results demonstrate that our technique produces the most competitive performance among current algorithms under sparse viewpoint setups according to both static and motion MVS data sets.", "title": "A Point-Cloud-Based Multiview Stereo Algorithm for Free-Viewpoint Video", "normalizedTitle": "A Point-Cloud-Based Multiview Stereo Algorithm for Free-Viewpoint Video", "fno": "ttg2010030407", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Multiview Stereo", "MVS", "Free Viewpoint Video", "Point Cloud" ], "authors": [ { "givenName": "Yebin", "surname": "Liu", "fullName": "Yebin Liu", "affiliation": "Tsinghua University, Beijing", "__typename": "ArticleAuthorType" }, { "givenName": "Qionghai", "surname": "Dai", "fullName": "Qionghai Dai", "affiliation": "Tsinghua University, Beijing", "__typename": "ArticleAuthorType" }, { "givenName": "Wenli", "surname": "Xu", "fullName": "Wenli Xu", "affiliation": "Tsinghua University, Beijing", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2010-05-01 00:00:00", "pubType": "trans", "pages": "407-418", "year": "2010", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iih-msp/2008/3278/0/3278a438", "title": "A Multi-view Video Coding Method Based on Distributed Source Coding for Free Viewpoint Switching", "doi": null, "abstractUrl": "/proceedings-article/iih-msp/2008/3278a438/12OmNAhxjBm", "parentPublication": { "id": "proceedings/iih-msp/2008/3278/0", "title": "2008 Fourth International Conference on Intelligent Information Hiding and Multimedia Signal Processing (IIH-MSP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2006/2606/0/26060220", "title": "Synthesizing Free-Viewpoint Images from Multiple View Videos in Soccer Stadium", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2006/26060220/12OmNB9bvjn", "parentPublication": { "id": "proceedings/cgiv/2006/2606/0", "title": "International Conference on Computer Graphics, Imaging and Visualisation (CGIV'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ngmast/2009/3786/0/3786a298", "title": "Minimizing Uplink Data in Wireless Free-Viewpoint Video Transmission Applications", "doi": null, "abstractUrl": "/proceedings-article/ngmast/2009/3786a298/12OmNqGitWa", "parentPublication": { "id": "proceedings/ngmast/2009/3786/0", "title": "Next Generation Mobile Applications, Services and Technologies, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/uksim/2012/4682/0/4682a165", "title": "Feel3D: Free Viewpoint Video Recording Mechanism for Premeditated Scenarios, Using Pre-built Models", "doi": null, "abstractUrl": "/proceedings-article/uksim/2012/4682a165/12OmNqI04TB", "parentPublication": { "id": "proceedings/uksim/2012/4682/0", "title": "Computer Modeling and Simulation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isise/2010/4360/0/4360a117", "title": "A Multiresolution Viewpoint Based Rendering for Large-scale Point Models", "doi": null, "abstractUrl": "/proceedings-article/isise/2010/4360a117/12OmNrMHOpQ", "parentPublication": { "id": "proceedings/isise/2010/4360/0", "title": "2010 Third International Symposium on Information Science and Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460252", "title": "Depth-map merging for Multi-View Stereo with high resolution images", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460252/12OmNwNwzMv", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2011/4589/0/4589a013", "title": "High Quality Free Viewpoint Synthesis Using Multi-view Images with Depth Information", "doi": null, "abstractUrl": "/proceedings-article/ism/2011/4589a013/12OmNxWcH0w", "parentPublication": { "id": "proceedings/ism/2011/4589/0", "title": "2011 IEEE International Symposium on Multimedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2008/03/ttp2008030548", "title": "Multiview Photometric Stereo", "doi": null, "abstractUrl": "/journal/tp/2008/03/ttp2008030548/13rRUB7a1gX", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2012/05/ttp2012050889", "title": "High Accuracy and Visibility-Consistent Dense Multiview Stereo", "doi": null, "abstractUrl": "/journal/tp/2012/05/ttp2012050889/13rRUwdIOVY", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/08/ttg2011081082", "title": "Fusing Multiview and Photometric Stereo for 3D Reconstruction under Uncalibrated Illumination", "doi": null, "abstractUrl": "/journal/tg/2011/08/ttg2011081082/13rRUy0qnGh", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2010030394", "articleId": "13rRUypp57y", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2010030419", "articleId": "13rRUEgarsF", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXWRWv", "name": "ttg2010030407s.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2010030407s.zip", "extension": "zip", "size": "28.9 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNqESuig", "title": "May/June", "year": "2010", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "16", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUEgarsF", "doi": "10.1109/TVCG.2009.64", "abstract": "In this paper, we present a novel method for texture mapping of closed surfaces. Our method is based on the technique of optimal mass transport (also known as the “earth-mover's metric”). This is a classical problem that concerns determining the optimal way, in the sense of minimal transportation cost, of moving a pile of soil from one site to another. In our context, the resulting mapping is area preserving and minimizes angle distortion in the optimal mass sense. Indeed, we first begin with an angle-preserving mapping (which may greatly distort area) and then correct it using the mass transport procedure derived via a certain gradient flow. In order to obtain fast convergence to the optimal mapping, we incorporate a multiresolution scheme into our flow. We also use ideas from discrete exterior calculus in our computations.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we present a novel method for texture mapping of closed surfaces. Our method is based on the technique of optimal mass transport (also known as the “earth-mover's metric”). This is a classical problem that concerns determining the optimal way, in the sense of minimal transportation cost, of moving a pile of soil from one site to another. In our context, the resulting mapping is area preserving and minimizes angle distortion in the optimal mass sense. Indeed, we first begin with an angle-preserving mapping (which may greatly distort area) and then correct it using the mass transport procedure derived via a certain gradient flow. In order to obtain fast convergence to the optimal mapping, we incorporate a multiresolution scheme into our flow. We also use ideas from discrete exterior calculus in our computations.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we present a novel method for texture mapping of closed surfaces. Our method is based on the technique of optimal mass transport (also known as the “earth-mover's metric”). This is a classical problem that concerns determining the optimal way, in the sense of minimal transportation cost, of moving a pile of soil from one site to another. In our context, the resulting mapping is area preserving and minimizes angle distortion in the optimal mass sense. Indeed, we first begin with an angle-preserving mapping (which may greatly distort area) and then correct it using the mass transport procedure derived via a certain gradient flow. In order to obtain fast convergence to the optimal mapping, we incorporate a multiresolution scheme into our flow. We also use ideas from discrete exterior calculus in our computations.", "title": "Texture Mapping via Optimal Mass Transport", "normalizedTitle": "Texture Mapping via Optimal Mass Transport", "fno": "ttg2010030419", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Texture Mapping", "Optimal Mass Transport", "Parametrization", "Spherical Wavelets" ], "authors": [ { "givenName": "Ayelet", "surname": "Dominitz", "fullName": "Ayelet Dominitz", "affiliation": "Technion, Haifa", "__typename": "ArticleAuthorType" }, { "givenName": "Allen", "surname": "Tannenbaum", "fullName": "Allen Tannenbaum", "affiliation": "Georgia Institute of Technology, Atlanta and Technion, Haifa", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2010-05-01 00:00:00", "pubType": "trans", "pages": "419-433", "year": "2010", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icmtma/2013/4932/0/4932b052", "title": "Study on Deformation of Surrounding Rock for Deep-Buried Tunnels in Rock Mass of Interbedded Sandstone and Slate", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2013/4932b052/12OmNC8Msuv", "parentPublication": { "id": "proceedings/icmtma/2013/4932/0", "title": "2013 Fifth International Conference on Measuring Technology and Mechatronics Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cso/2009/3605/1/3605b016", "title": "Product Family Knowledge Modeling for Mass Customization", "doi": null, "abstractUrl": "/proceedings-article/cso/2009/3605b016/12OmNwD1q4k", "parentPublication": { "id": "cso/2009/3605/1", "title": "2009 International Joint Conference on Computational Sciences and Optimization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vlsm/2001/1278/0/12780029", "title": "Optimal Mass Transport and Image Registration", "doi": null, "abstractUrl": "/proceedings-article/vlsm/2001/12780029/12OmNwwd2VE", "parentPublication": { "id": "proceedings/vlsm/2001/1278/0", "title": "Proceedings of 1st IEEE Workshop on Variational and Level Set Methods in Computer Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mcsul/2009/3976/0/3976a071", "title": "An Adaptive Mesh Strategy for Transient Flows Simulations", "doi": null, "abstractUrl": "/proceedings-article/mcsul/2009/3976a071/12OmNy3AgxY", "parentPublication": { "id": "proceedings/mcsul/2009/3976/0", "title": "Computational Modeling, Southern Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/nt/2017/01/07569011", "title": "PASE: Synthesizing Existing Transport Strategies for Near-Optimal Data Center Transport", "doi": null, "abstractUrl": "/journal/nt/2017/01/07569011/13rRUB7a18E", "parentPublication": { "id": "trans/nt", "title": "IEEE/ACM Transactions on Networking", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/05/ttg2009050777", "title": "Uncluttering Graph Layouts Using Anisotropic Diffusion and Mass Transport", "doi": null, "abstractUrl": "/journal/tg/2009/05/ttg2009050777/13rRUwInvyr", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2000/02/v0181", "title": "Conformal Surface Parameterization for Texture Mapping", "doi": null, "abstractUrl": "/journal/tg/2000/02/v0181/13rRUxBJhFj", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tb/2008/01/ttb2008010091", "title": "Combinatorial Approaches for Mass Spectra Recalibration", "doi": null, "abstractUrl": "/journal/tb/2008/01/ttb2008010091/13rRUxZ0nZP", "parentPublication": { "id": "trans/tb", "title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2015/11/07053911", "title": "Optimal Mass Transport for Shape Matching and Comparison", "doi": null, "abstractUrl": "/journal/tp/2015/11/07053911/13rRUxlgxUD", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/12/ttg2013122838", "title": "Area-Preservation Mapping using Optimal Mass Transport", "doi": null, "abstractUrl": "/journal/tg/2013/12/ttg2013122838/13rRUyuegp6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2010030407", "articleId": "13rRUxYINf5", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2010030434", "articleId": "13rRUyp7tWT", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNqESuig", "title": "May/June", "year": "2010", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "16", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUyp7tWT", "doi": "10.1109/TVCG.2009.73", "abstract": "Ray-triangle intersection is an important algorithm, not only in the field of realistic rendering (based on ray tracing) but also in physics simulation, collision detection, modeling, etc. Obviously, the speed of this well-defined algorithm's implementations is important because calls to such a routine are numerous in rendering and simulation applications. Contemporary fast intersection algorithms, which use SIMD instructions, focus on the intersection of ray packets against triangles. For intersection between single rays and triangles, operations such as horizontal addition or dot product are required. The SSE4 instruction set adds the dot product instruction which can be used for this purpose. This paper presents a new modification of the fast ray-triangle intersection algorithms commonly used, which—when implemented on SSE4—outperforms the current state-of-the-art algorithms. It also allows both a single ray and ray packet intersection calculation with the same precomputed data. The speed gain measurements are described and discussed in the paper.", "abstracts": [ { "abstractType": "Regular", "content": "Ray-triangle intersection is an important algorithm, not only in the field of realistic rendering (based on ray tracing) but also in physics simulation, collision detection, modeling, etc. Obviously, the speed of this well-defined algorithm's implementations is important because calls to such a routine are numerous in rendering and simulation applications. Contemporary fast intersection algorithms, which use SIMD instructions, focus on the intersection of ray packets against triangles. For intersection between single rays and triangles, operations such as horizontal addition or dot product are required. The SSE4 instruction set adds the dot product instruction which can be used for this purpose. This paper presents a new modification of the fast ray-triangle intersection algorithms commonly used, which—when implemented on SSE4—outperforms the current state-of-the-art algorithms. It also allows both a single ray and ray packet intersection calculation with the same precomputed data. The speed gain measurements are described and discussed in the paper.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Ray-triangle intersection is an important algorithm, not only in the field of realistic rendering (based on ray tracing) but also in physics simulation, collision detection, modeling, etc. Obviously, the speed of this well-defined algorithm's implementations is important because calls to such a routine are numerous in rendering and simulation applications. Contemporary fast intersection algorithms, which use SIMD instructions, focus on the intersection of ray packets against triangles. For intersection between single rays and triangles, operations such as horizontal addition or dot product are required. The SSE4 instruction set adds the dot product instruction which can be used for this purpose. This paper presents a new modification of the fast ray-triangle intersection algorithms commonly used, which—when implemented on SSE4—outperforms the current state-of-the-art algorithms. It also allows both a single ray and ray packet intersection calculation with the same precomputed data. The speed gain measurements are described and discussed in the paper.", "title": "Yet Faster Ray-Triangle Intersection (Using SSE4)", "normalizedTitle": "Yet Faster Ray-Triangle Intersection (Using SSE4)", "fno": "ttg2010030434", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Ray Tracing", "Geometric Algorithms" ], "authors": [ { "givenName": "Jiří", "surname": "Havel", "fullName": "Jiří Havel", "affiliation": "Brno University of Technology, Brno", "__typename": "ArticleAuthorType" }, { "givenName": "Adam", "surname": "Herout", "fullName": "Adam Herout", "affiliation": "Brno University of Technology, Brno", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2010-05-01 00:00:00", "pubType": "trans", "pages": "434-438", "year": "2010", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/rt/2007/1629/0/04342586", "title": "Ray-Strips: A Compact Mesh Representation for Interactive Ray Tracing", "doi": null, "abstractUrl": "/proceedings-article/rt/2007/04342586/12OmNASILFQ", "parentPublication": { "id": "proceedings/rt/2007/1629/0", "title": "IEEE/ EG Symposium on Interactive Ray Tracing 2007", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rt/2008/2741/0/04634636", "title": "The edge volume heuristic - robust triangle subdivision for improved BVH performance", "doi": null, "abstractUrl": "/proceedings-article/rt/2008/04634636/12OmNBQkwYi", "parentPublication": { "id": "proceedings/rt/2008/2741/0", "title": "Symposium on Interactive Ray Tracing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rt/2007/1629/0/04342597", "title": "Faster Ray Packets - Triangle Intersection through Vertex Culling", "doi": null, "abstractUrl": "/proceedings-article/rt/2007/04342597/12OmNBd9T0F", "parentPublication": { "id": "proceedings/rt/2007/1629/0", "title": "IEEE/ EG Symposium on Interactive Ray Tracing 2007", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rt/2008/2741/0/04634642", "title": "Hardware architecture design and implementation of ray-triangle intersection with bounding volume hierarchies", "doi": null, "abstractUrl": "/proceedings-article/rt/2008/04634642/12OmNvjyxSY", "parentPublication": { "id": "proceedings/rt/2008/2741/0", "title": "Symposium on Interactive Ray Tracing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/clusterwksp/2010/8396/0/05613082", "title": "High performance triangle versus box intersection checks", "doi": null, "abstractUrl": "/proceedings-article/clusterwksp/2010/05613082/12OmNwDACvh", "parentPublication": { "id": "proceedings/clusterwksp/2010/8396/0", "title": "2010 IEEE International Conference On Cluster Computing Workshops and Posters (CLUSTER WORKSHOPS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rt/2006/0693/0/04061543", "title": "Optimizing Ray-Triangle Intersection via Automated Search", "doi": null, "abstractUrl": "/proceedings-article/rt/2006/04061543/12OmNwFidbs", "parentPublication": { "id": "proceedings/rt/2006/0693/0", "title": "IEEE Symposium on Interactive Ray Tracing 2006", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rt/2008/2741/0/04634618", "title": "Multi bounding volume hierarchies", "doi": null, "abstractUrl": "/proceedings-article/rt/2008/04634618/12OmNweBUCF", "parentPublication": { "id": "proceedings/rt/2008/2741/0", "title": "Symposium on Interactive Ray Tracing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/1995/04/v0343", "title": "Octree-R: An Adaptive Octree for Efficient Ray Tracing", "doi": null, "abstractUrl": "/journal/tg/1995/04/v0343/13rRUxcbnH0", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/12/08115176", "title": "Time Interval Ray Tracing for Motion Blur", "doi": null, "abstractUrl": "/journal/tg/2018/12/08115176/14H4WMfTBId", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2010030419", "articleId": "13rRUEgarsF", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2010030439", "articleId": "13rRUxYrbUz", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNqESuig", "title": "May/June", "year": "2010", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "16", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxYrbUz", "doi": "10.1109/TVCG.2009.84", "abstract": "We present a model for building, visualizing, and interacting with multiscale representations of information visualization techniques using hierarchical aggregation. The motivation for this work is to make visual representations more visually scalable and less cluttered. The model allows for augmenting existing techniques with multiscale functionality, as well as for designing new visualization and interaction techniques that conform to this new class of visual representations. We give some examples of how to use the model for standard information visualization techniques such as scatterplots, parallel coordinates, and node-link diagrams, and discuss existing techniques that are based on hierarchical aggregation. This yields a set of design guidelines for aggregated visualizations. We also present a basic vocabulary of interaction techniques suitable for navigating these multiscale visualizations.", "abstracts": [ { "abstractType": "Regular", "content": "We present a model for building, visualizing, and interacting with multiscale representations of information visualization techniques using hierarchical aggregation. The motivation for this work is to make visual representations more visually scalable and less cluttered. The model allows for augmenting existing techniques with multiscale functionality, as well as for designing new visualization and interaction techniques that conform to this new class of visual representations. We give some examples of how to use the model for standard information visualization techniques such as scatterplots, parallel coordinates, and node-link diagrams, and discuss existing techniques that are based on hierarchical aggregation. This yields a set of design guidelines for aggregated visualizations. We also present a basic vocabulary of interaction techniques suitable for navigating these multiscale visualizations.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a model for building, visualizing, and interacting with multiscale representations of information visualization techniques using hierarchical aggregation. The motivation for this work is to make visual representations more visually scalable and less cluttered. The model allows for augmenting existing techniques with multiscale functionality, as well as for designing new visualization and interaction techniques that conform to this new class of visual representations. We give some examples of how to use the model for standard information visualization techniques such as scatterplots, parallel coordinates, and node-link diagrams, and discuss existing techniques that are based on hierarchical aggregation. This yields a set of design guidelines for aggregated visualizations. We also present a basic vocabulary of interaction techniques suitable for navigating these multiscale visualizations.", "title": "Hierarchical Aggregation for Information Visualization: Overview, Techniques, and Design Guidelines", "normalizedTitle": "Hierarchical Aggregation for Information Visualization: Overview, Techniques, and Design Guidelines", "fno": "ttg2010030439", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Aggregation", "Clustering", "Clutter Reduction", "Massive Data Sets", "Visual Exploration", "Visual Analytics" ], "authors": [ { "givenName": "Niklas", "surname": "Elmqvist", "fullName": "Niklas Elmqvist", "affiliation": "Purdue University, West Lafayette", "__typename": "ArticleAuthorType" }, { "givenName": "Jean-Daniel", "surname": "Fekete", "fullName": "Jean-Daniel Fekete", "affiliation": "INRIA Saclay, Université Paris-Sud, Paris", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2010-05-01 00:00:00", "pubType": "trans", "pages": "439-454", "year": "2010", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cgiv/2009/3789/0/3789a454", "title": "Visualization Techniques on the Examination Timetabling Pre-processing Data", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2009/3789a454/12OmNC17hW4", "parentPublication": { "id": "proceedings/cgiv/2009/3789/0", "title": "2009 Sixth International Conference on Computer Graphics, Imaging and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2014/6227/0/07042483", "title": "Weaving a carpet from log entries: A network security visualization built with co-creation", "doi": null, "abstractUrl": "/proceedings-article/vast/2014/07042483/12OmNwtn3An", "parentPublication": { "id": "proceedings/vast/2014/6227/0", "title": "2014 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2014/2874/0/2874a352", "title": "Visualization for Visual Analytics: Micro-visualization, Abstraction, and Physical Appeal", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2014/2874a352/12OmNySG3Oy", "parentPublication": { "id": "proceedings/pacificvis/2014/2874/0", "title": "2014 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/biovis/2011/0003/0/063069vehlow2", "title": "iHAT: Interactive hierarchical aggregation table", "doi": null, "abstractUrl": "/proceedings-article/biovis/2011/063069vehlow2/12OmNzZWbBO", "parentPublication": { "id": "proceedings/biovis/2011/0003/0", "title": "2011 IEEE Symposium on Biological Data Visualization (BioVis).", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/06/ttg2010060943", "title": "How Information Visualization Novices Construct Visualizations", "doi": null, "abstractUrl": "/journal/tg/2010/06/ttg2010060943/13rRUwInvAZ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/12/ttg2011122392", "title": "Sequence Surveyor: Leveraging Overview for Scalable Genomic Alignment Visualization", "doi": null, "abstractUrl": "/journal/tg/2011/12/ttg2011122392/13rRUxOdD2C", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2022/8812/0/881200a016", "title": "Streamlining Visualization Authoring in D3 Through User-Driven Templates", "doi": null, "abstractUrl": "/proceedings-article/vis/2022/881200a016/1J6heEO48bS", "parentPublication": { "id": "proceedings/vis/2022/8812/0", "title": "2022 IEEE Visualization and Visual Analytics (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2022/8812/0/881200a090", "title": "Who benefits from Visualization Adaptations? Towards a better Understanding of the Influence of Visualization Literacy", "doi": null, "abstractUrl": "/proceedings-article/vis/2022/881200a090/1J6hfplZRDO", "parentPublication": { "id": "proceedings/vis/2022/8812/0", "title": "2022 IEEE Visualization and Visual Analytics (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09492011", "title": "A Survey of Perception-Based Visualization Studies by Task", "doi": null, "abstractUrl": "/journal/tg/2022/12/09492011/1volPuHGMdW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09528956", "title": "Multiscale Visualization: A Structured Literature Analysis", "doi": null, "abstractUrl": "/journal/tg/2022/12/09528956/1wB2xUo1WKY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2010030434", "articleId": "13rRUyp7tWT", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2010030455", "articleId": "13rRUwInvf1", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNqESuig", "title": "May/June", "year": "2010", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "16", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwInvf1", "doi": "10.1109/TVCG.2009.98", "abstract": "Focus+context interaction techniques based on the metaphor of lenses are used to navigate and interact with objects in large information spaces. They provide in-place magnification of a region of the display without requiring users to zoom into the representation and consequently lose context. In order to avoid occlusion of its immediate surroundings, the magnified region is often integrated in the context using smooth transitions based on spatial distortion. Such lenses have been developed for various types of representations using techniques often tightly coupled with the underlying graphics framework. We describe a representation-independent solution that can be implemented with minimal effort in different graphics frameworks, ranging from 3D graphics to rich multiscale 2D graphics combining text, bitmaps, and vector graphics. Our solution is not limited to spatial distortion and provides a unified model that makes it possible to define new focus+context interaction techniques based on lenses whose transition is defined by a combination of dynamic displacement and compositing functions. We present the results of a series of user evaluations that show that one such new lens, the speed-coupled blending lens, significantly outperforms all others.", "abstracts": [ { "abstractType": "Regular", "content": "Focus+context interaction techniques based on the metaphor of lenses are used to navigate and interact with objects in large information spaces. They provide in-place magnification of a region of the display without requiring users to zoom into the representation and consequently lose context. In order to avoid occlusion of its immediate surroundings, the magnified region is often integrated in the context using smooth transitions based on spatial distortion. Such lenses have been developed for various types of representations using techniques often tightly coupled with the underlying graphics framework. We describe a representation-independent solution that can be implemented with minimal effort in different graphics frameworks, ranging from 3D graphics to rich multiscale 2D graphics combining text, bitmaps, and vector graphics. Our solution is not limited to spatial distortion and provides a unified model that makes it possible to define new focus+context interaction techniques based on lenses whose transition is defined by a combination of dynamic displacement and compositing functions. We present the results of a series of user evaluations that show that one such new lens, the speed-coupled blending lens, significantly outperforms all others.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Focus+context interaction techniques based on the metaphor of lenses are used to navigate and interact with objects in large information spaces. They provide in-place magnification of a region of the display without requiring users to zoom into the representation and consequently lose context. In order to avoid occlusion of its immediate surroundings, the magnified region is often integrated in the context using smooth transitions based on spatial distortion. Such lenses have been developed for various types of representations using techniques often tightly coupled with the underlying graphics framework. We describe a representation-independent solution that can be implemented with minimal effort in different graphics frameworks, ranging from 3D graphics to rich multiscale 2D graphics combining text, bitmaps, and vector graphics. Our solution is not limited to spatial distortion and provides a unified model that makes it possible to define new focus+context interaction techniques based on lenses whose transition is defined by a combination of dynamic displacement and compositing functions. We present the results of a series of user evaluations that show that one such new lens, the speed-coupled blending lens, significantly outperforms all others.", "title": "Representation-Independent In-Place Magnification with Sigma Lenses", "normalizedTitle": "Representation-Independent In-Place Magnification with Sigma Lenses", "fno": "ttg2010030455", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Graphical User Interfaces", "Visualization Techniques And Methodologies", "Interaction Techniques", "Evaluation Methodology" ], "authors": [ { "givenName": "Emmanuel", "surname": "Pietriga", "fullName": "Emmanuel Pietriga", "affiliation": "INRIA Saclay and Université Paris-Sud, CNRS, Orsay", "__typename": "ArticleAuthorType" }, { "givenName": "Olivier", "surname": "Bau", "fullName": "Olivier Bau", "affiliation": "INRIA Saclay and Université Paris-Sud, CNRS, Orsay", "__typename": "ArticleAuthorType" }, { "givenName": "Caroline", "surname": "Appert", "fullName": "Caroline Appert", "affiliation": "INRIA Saclay and Université Paris-Sud, CNRS, Orsay", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2010-05-01 00:00:00", "pubType": "trans", "pages": "455-467", "year": "2010", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icip/1994/6952/2/00413669", "title": "Depth estimation using stereo fish-eye lenses", "doi": null, "abstractUrl": "/proceedings-article/icip/1994/00413669/12OmNBfqG59", "parentPublication": { "id": "proceedings/icip/1994/6952/2", "title": "Proceedings of 1st International Conference on Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2002/1435/4/14350112b", "title": "Exploratory Navigation in Large Multimedia Documents Using Context Lenses", "doi": null, "abstractUrl": "/proceedings-article/hicss/2002/14350112b/12OmNBuL1fB", "parentPublication": { "id": "proceedings/hicss/2002/1435/4", "title": "Proceedings of the 35th Annual Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2006/2602/0/26020017", "title": "Fisheye Tree Views and Lenses for Graph Visualization", "doi": null, "abstractUrl": "/proceedings-article/iv/2006/26020017/12OmNz4SOxE", "parentPublication": { "id": "proceedings/iv/2006/2602/0", "title": "Tenth International Conference on Information Visualisation (IV'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2008/3268/0/3268a356", "title": "3D Generalization Lenses for Interactive Focus + Context Visualization of Virtual City Models", "doi": null, "abstractUrl": "/proceedings-article/iv/2008/3268a356/12OmNzaQoEB", "parentPublication": { "id": "proceedings/iv/2008/3268/0", "title": "2008 12th International Conference Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/09/ttg2011091259", "title": "Single-Pass Composable 3D Lens Rendering and Spatiotemporal 3D Lenses", "doi": null, "abstractUrl": "/journal/tg/2011/09/ttg2011091259/13rRUwkfAZe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/08/08396300", "title": "Decal-Lenses: Interactive Lenses on Surfaces for Multivariate Visualization", "doi": null, "abstractUrl": "/journal/tg/2019/08/08396300/13rRUyeCkap", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/02/ttg2010020235", "title": "Route Visualization Using Detail Lenses", "doi": null, "abstractUrl": "/journal/tg/2010/02/ttg2010020235/13rRUygT7sx", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/03/ttg2010030394", "title": "Real-Time Rendering Method and Performance Evaluation of Composable 3D Lenses for Interactive VR", "doi": null, "abstractUrl": "/journal/tg/2010/03/ttg2010030394/13rRUypp57y", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/scivis/2018/6882/0/08823618", "title": "3De Interactive Lenses for Visualization in Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/scivis/2018/08823618/1d5kwZvgfNm", "parentPublication": { "id": "proceedings/scivis/2018/6882/0", "title": "2018 IEEE Scientific Visualization Conference (SciVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2010030439", "articleId": "13rRUxYrbUz", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2010030468", "articleId": "13rRUxlgxOg", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgEk", "name": "ttg2010030455s.mov", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2010030455s.mov", "extension": "mov", "size": "37.3 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNqESuig", "title": "May/June", "year": "2010", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "16", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxlgxOg", "doi": "10.1109/TVCG.2009.86", "abstract": "Navigating in large geometric spaces—such as maps, social networks, or long documents—typically requires a sequence of pan and zoom actions. However, this strategy is often ineffective and cumbersome, especially when trying to study and compare several distant objects. We propose a new distortion technique that folds the intervening space to guarantee visibility of multiple focus regions. The folds themselves show contextual information and support unfolding and paging interactions. We conducted a study comparing the space-folding technique to existing approaches and found that participants performed significantly better with the new technique. We also describe how to implement this distortion technique and give an in-depth case study on how to apply it to the visualization of large-scale 1D time-series data.", "abstracts": [ { "abstractType": "Regular", "content": "Navigating in large geometric spaces—such as maps, social networks, or long documents—typically requires a sequence of pan and zoom actions. However, this strategy is often ineffective and cumbersome, especially when trying to study and compare several distant objects. We propose a new distortion technique that folds the intervening space to guarantee visibility of multiple focus regions. The folds themselves show contextual information and support unfolding and paging interactions. We conducted a study comparing the space-folding technique to existing approaches and found that participants performed significantly better with the new technique. We also describe how to implement this distortion technique and give an in-depth case study on how to apply it to the visualization of large-scale 1D time-series data.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Navigating in large geometric spaces—such as maps, social networks, or long documents—typically requires a sequence of pan and zoom actions. However, this strategy is often ineffective and cumbersome, especially when trying to study and compare several distant objects. We propose a new distortion technique that folds the intervening space to guarantee visibility of multiple focus regions. The folds themselves show contextual information and support unfolding and paging interactions. We conducted a study comparing the space-folding technique to existing approaches and found that participants performed significantly better with the new technique. We also describe how to implement this distortion technique and give an in-depth case study on how to apply it to the visualization of large-scale 1D time-series data.", "title": "Mélange: Space Folding for Visual Exploration", "normalizedTitle": "Mélange: Space Folding for Visual Exploration", "fno": "ttg2010030468", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Interaction", "Visualization", "Navigation", "Exploration", "Folding", "Split Screen", "Space Distortion", "Focus Context" ], "authors": [ { "givenName": "Niklas", "surname": "Elmqvist", "fullName": "Niklas Elmqvist", "affiliation": "Purdue University, West Lafayette", "__typename": "ArticleAuthorType" }, { "givenName": "Yann", "surname": "Riche", "fullName": "Yann Riche", "affiliation": "Independant Researcher, Seattle", "__typename": "ArticleAuthorType" }, { "givenName": "Nathalie", "surname": "Henry-Riche", "fullName": "Nathalie Henry-Riche", "affiliation": "Microsoft Research, Redmond", "__typename": "ArticleAuthorType" }, { "givenName": "Jean-Daniel", "surname": "Fekete", "fullName": "Jean-Daniel Fekete", "affiliation": "INRIA Saclay, INRIA/LRI, Paris", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2010-05-01 00:00:00", "pubType": "trans", "pages": "468-483", "year": "2010", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cgiv/2005/2392/0/23920162", "title": "DualView: A Focus+Context Technique for Navigating Large Graphs", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2005/23920162/12OmNBBQZqd", "parentPublication": { "id": "proceedings/cgiv/2005/2392/0", "title": "International Conference on Computer Graphics, Imaging and Visualization (CGIV'05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icst/2018/5012/0/501201a139", "title": "Exhaustive Exploration of the Failure-Oblivious Computing Search Space", "doi": null, "abstractUrl": "/proceedings-article/icst/2018/501201a139/12OmNrNh0HO", "parentPublication": { "id": "proceedings/icst/2018/5012/0", "title": "2018 IEEE 11th International Conference on Software Testing, Verification and Validation (ICST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ispdc/2011/4540/0/4540a033", "title": "Using Virtualization and Job Folding for Batch Scheduling", "doi": null, "abstractUrl": "/proceedings-article/ispdc/2011/4540a033/12OmNvCRgkY", "parentPublication": { "id": "proceedings/ispdc/2011/4540/0", "title": "Parallel and Distributed Computing, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iacsit-sc/2009/3653/0/3653a581", "title": "Computer Algorithms for Discriminating Protein Folds and Predicting Protein Folding Rates Based on Contact Information", "doi": null, "abstractUrl": "/proceedings-article/iacsit-sc/2009/3653a581/12OmNwpoFJ9", "parentPublication": { "id": "proceedings/iacsit-sc/2009/3653/0", "title": "Computer Science and Information Technology, International Association of", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2010/8420/0/05720344", "title": "Visual Data Exploration to Feature Space Definition", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2010/05720344/12OmNxGSm67", "parentPublication": { "id": "proceedings/sibgrapi/2010/8420/0", "title": "2010 23rd SIBGRAPI Conference on Graphics, Patterns and Images", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ecbs/2011/4379/0/4379a071", "title": "A Meta-Framework for Design Space Exploration", "doi": null, "abstractUrl": "/proceedings-article/ecbs/2011/4379a071/12OmNzUPpnT", "parentPublication": { "id": "proceedings/ecbs/2011/4379/0", "title": "Engineering of Computer-Based Systems, IEEE International Conference on the", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-infovis/2005/2790/0/27900011", "title": "An Evaluation of Content Browsing Techniques for Hierarchical Space-Filling Visualizations", "doi": null, "abstractUrl": "/proceedings-article/ieee-infovis/2005/27900011/12OmNzVXNNF", "parentPublication": { "id": "proceedings/ieee-infovis/2005/2790/0", "title": "Information Visualization, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icst/2012/4670/0/4670a320", "title": "Evaluating Machine-Independent Metrics for State-Space Exploration", "doi": null, "abstractUrl": "/proceedings-article/icst/2012/4670a320/12OmNzayNp2", "parentPublication": { "id": "proceedings/icst/2012/4670/0", "title": "2012 IEEE Fifth International Conference on Software Testing, Verification and Validation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/1973/11/01672230", "title": "Fault Folding for Irredundant and Redundant Combinational Circuits", "doi": null, "abstractUrl": "/journal/tc/1973/11/01672230/13rRUyfKIGj", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07536133", "title": "WeightLifter: Visual Weight Space Exploration for Multi-Criteria Decision Making", "doi": null, "abstractUrl": "/journal/tg/2017/01/07536133/13rRUyuNsx1", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2010030455", "articleId": "13rRUwInvf1", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2010030484", "articleId": "13rRUwI5TQU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }