data
dict
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1jrU0RsEpnG", "doi": "10.1109/TVCG.2020.2973745", "abstract": "Presents the introductory editorial for this issue of the publication.", "abstracts": [ { "abstractType": "Regular", "content": "Presents the introductory editorial for this issue of the publication.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents the introductory editorial for this issue of the publication.", "title": "Editor's Note", "normalizedTitle": "Editor's Note", "fno": "09082801", "hasPdf": true, "idPrefix": "tg", "keywords": [], "authors": [ { "givenName": "Klaus", "surname": "Mueller", "fullName": "Klaus Mueller", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2135-2141", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "mags/an/2022/03/09875139", "title": "From the Editor's Desk", "doi": null, "abstractUrl": "/magazine/an/2022/03/09875139/1GlbXTIEwaQ", "parentPublication": { "id": "mags/an", "title": "IEEE Annals of the History of Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/an/2022/04/09972860", "title": "From the Editor's Desk", "doi": null, "abstractUrl": "/magazine/an/2022/04/09972860/1ISVNzFCZu8", "parentPublication": { "id": "mags/an", "title": "IEEE Annals of the History of Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2020/02/08956009", "title": "Editor's Note", "doi": null, "abstractUrl": "/journal/td/2020/02/08956009/1gtJY06WATe", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/an/2020/01/09031986", "title": "From the Editor's Desk", "doi": null, "abstractUrl": "/magazine/an/2020/01/09031986/1i6VhktGnkc", "parentPublication": { "id": "mags/an", "title": "IEEE Annals of the History of Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/an/2020/02/09103673", "title": "From the Editor's Desk", "doi": null, "abstractUrl": "/magazine/an/2020/02/09103673/1keqEV28ioE", "parentPublication": { "id": "mags/an", "title": "IEEE Annals of the History of Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2021/04/09257115", "title": "Editor's Note", "doi": null, "abstractUrl": "/journal/td/2021/04/09257115/1oFCKncAhqM", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/an/2020/04/09263260", "title": "From the Editor's Desk", "doi": null, "abstractUrl": "/magazine/an/2020/04/09263260/1oReM0ot75m", "parentPublication": { "id": "mags/an", "title": "IEEE Annals of the History of Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2021/10/09408530", "title": "Editor's Note", "doi": null, "abstractUrl": "/journal/td/2021/10/09408530/1sVEVpV9zNK", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/an/2021/03/09546090", "title": "From the Editor's Desk", "doi": null, "abstractUrl": "/magazine/an/2021/03/09546090/1x6zEFuXbH2", "parentPublication": { "id": "mags/an", "title": "IEEE Annals of the History of Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/12/09586410", "title": "Editor's Note", "doi": null, "abstractUrl": "/journal/tg/2021/12/09586410/1y11sTji3vO", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": null, "next": { "fno": "09082802", "articleId": "1jrTVLo1tpC", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1jrTVLo1tpC", "doi": "10.1109/TVCG.2020.2974638", "abstract": "The five papers in this special section were from the 2020 IEEE Pacific Visualization Symposium (IEEE PacificVis), which was scheduled to be hosted by Tianjin University and held in Tianjin, China, from April 14 to 17, 2020.", "abstracts": [ { "abstractType": "Regular", "content": "The five papers in this special section were from the 2020 IEEE Pacific Visualization Symposium (IEEE PacificVis), which was scheduled to be hosted by Tianjin University and held in Tianjin, China, from April 14 to 17, 2020.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The five papers in this special section were from the 2020 IEEE Pacific Visualization Symposium (IEEE PacificVis), which was scheduled to be hosted by Tianjin University and held in Tianjin, China, from April 14 to 17, 2020.", "title": "Guest Editors’ Introduction: Special Section on IEEE PacificVis 2020", "normalizedTitle": "Guest Editors’ Introduction: Special Section on IEEE PacificVis 2020", "fno": "09082802", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Special Issues And Sections", "Meetings", "Visualization", "Computer Graphics" ], "authors": [ { "givenName": "Fabian", "surname": "Beck", "fullName": "Fabian Beck", "affiliation": "Paluno - The Ruhr Institute for Software Technology, University of Duisburg-Essen, Duisburg, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Jinwook", "surname": "Seo", "fullName": "Jinwook Seo", "affiliation": "Department of Computer Science and Engineering, Seoul National University, Seoul, Korea", "__typename": "ArticleAuthorType" }, { "givenName": "Chaoli", "surname": "Wang", "fullName": "Chaoli Wang", "affiliation": "Department of Computer Science and Engineering, University of Notre Dame, Notre Dame, IN, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2142-2143", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tg/2013/06/ttg2013060898", "title": "Guest Editors' Introduction: Special Section on the IEEE Pacific Visualization Symposium 2012", "doi": null, "abstractUrl": "/journal/tg/2013/06/ttg2013060898/13rRUNvgziD", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/08/07138667", "title": "Guest Editors’ Introduction: Special Section on the IEEE Pacific Visualization Symposium 2014", "doi": null, "abstractUrl": "/journal/tg/2015/08/07138667/13rRUwI5Ugf", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/08/06847259", "title": "Guest Editors' Introduction: Special Section on the IEEE Pacific Visualization Symposium", "doi": null, "abstractUrl": "/journal/tg/2014/08/06847259/13rRUxD9gXJ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/06/08352605", "title": "Guest Editors’ Introduction: Special Section on IEEE PacificVis 2018", "doi": null, "abstractUrl": "/journal/tg/2018/06/08352605/13rRUxlgxOp", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/06/08703194", "title": "Guest Editors' Introduction: Special Section on IEEE PacificVis 2019", "doi": null, "abstractUrl": "/journal/tg/2019/06/08703194/19Er7j5Ad7a", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tb/2022/01/09702708", "title": "Guest Editors’ Introduction to the Special Section on Bioinformatics Research and Applications", "doi": null, "abstractUrl": "/journal/tb/2022/01/09702708/1AH375DQaGY", "parentPublication": { "id": "trans/tb", "title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/06/09766260", "title": "Guest Editors' Introduction: Special Section on IEEE PacificVis 2022", "doi": null, "abstractUrl": "/journal/tg/2022/06/09766260/1D34QjpFGyQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tb/2022/03/09788108", "title": "Editorial", "doi": null, "abstractUrl": "/journal/tb/2022/03/09788108/1DU9k5pRa4o", "parentPublication": { "id": "trans/tb", "title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2020/07/09108341", "title": "Guest Editors' Introduction to the Special Section on Computational Photography", "doi": null, "abstractUrl": "/journal/tp/2020/07/09108341/1koL3gQqTHa", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/06/09430173", "title": "Guest Editors' Introduction: Special Section on IEEE PacificVis 2021", "doi": null, "abstractUrl": "/journal/tg/2021/06/09430173/1tzuiF6azcs", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09082801", "articleId": "1jrU0RsEpnG", "__typename": "AdjacentArticleType" }, "next": { "fno": "08978585", "articleId": "1haUx0fpghW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1haUx0fpghW", "doi": "10.1109/TVCG.2020.2970509", "abstract": "When point clouds are labeled in information visualization applications, sophisticated guidelines as in cartography do not yet exist. Existing naive strategies may mislead as to which points belong to which label. To inform improved strategies, we studied factors influencing this phenomenon. We derived a class of labeled point cloud representations from existing applications and we defined different models predicting how humans interpret such complex representations, focusing on their geometric properties. We conducted an empirical study, in which participants had to relate dots to labels in order to evaluate how well our models predict. Our results indicate that presence of point clusters, label size, and angle to the label have an effect on participants' judgment as well as that the distance measure types considered perform differently discouraging the use of label centers as reference points.", "abstracts": [ { "abstractType": "Regular", "content": "When point clouds are labeled in information visualization applications, sophisticated guidelines as in cartography do not yet exist. Existing naive strategies may mislead as to which points belong to which label. To inform improved strategies, we studied factors influencing this phenomenon. We derived a class of labeled point cloud representations from existing applications and we defined different models predicting how humans interpret such complex representations, focusing on their geometric properties. We conducted an empirical study, in which participants had to relate dots to labels in order to evaluate how well our models predict. Our results indicate that presence of point clusters, label size, and angle to the label have an effect on participants' judgment as well as that the distance measure types considered perform differently discouraging the use of label centers as reference points.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "When point clouds are labeled in information visualization applications, sophisticated guidelines as in cartography do not yet exist. Existing naive strategies may mislead as to which points belong to which label. To inform improved strategies, we studied factors influencing this phenomenon. We derived a class of labeled point cloud representations from existing applications and we defined different models predicting how humans interpret such complex representations, focusing on their geometric properties. We conducted an empirical study, in which participants had to relate dots to labels in order to evaluate how well our models predict. Our results indicate that presence of point clusters, label size, and angle to the label have an effect on participants' judgment as well as that the distance measure types considered perform differently discouraging the use of label centers as reference points.", "title": "Modeling How Humans Judge Dot-Label Relations in Point Cloud Visualizations", "normalizedTitle": "Modeling How Humans Judge Dot-Label Relations in Point Cloud Visualizations", "fno": "08978585", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Cartography", "Data Visualisation", "Distance Measurement", "Point Cloud Visualizations", "Information Visualization Applications", "Labeled Point Cloud Representations", "Complex Representations", "Point Clusters", "Label Size", "Label Centers", "Dot Label Relations", "Visualization", "Three Dimensional Displays", "Labeling", "Task Analysis", "Predictive Models", "Urban Areas", "Lenses", "Human Judgment Model", "Document Visualization", "Label Placement" ], "authors": [ { "givenName": "Martin", "surname": "Reckziegel", "fullName": "Martin Reckziegel", "affiliation": "Leipzig University", "__typename": "ArticleAuthorType" }, { "givenName": "Linda", "surname": "Pfeiffer", "fullName": "Linda Pfeiffer", "affiliation": "German Aerospace Center DLR", "__typename": "ArticleAuthorType" }, { "givenName": "Christian", "surname": "Heine", "fullName": "Christian Heine", "affiliation": "Leipzig University", "__typename": "ArticleAuthorType" }, { "givenName": "Stefan", "surname": "Jänicke", "fullName": "Stefan Jänicke", "affiliation": "University of Southern Denmark", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2144-2155", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icdmw/2017/3800/0/3800a850", "title": "Combining Active Learning and Semi-Supervised Learning by Using Selective Label Spreading", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2017/3800a850/12OmNvFHfGd", "parentPublication": { "id": "proceedings/icdmw/2017/3800/0", "title": "2017 IEEE International Conference on Data Mining Workshops (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/06/ttg2008061237", "title": "Particle-based labeling: Fast point-feature labeling without obscuring other visual features", "doi": null, "abstractUrl": "/journal/tg/2008/06/ttg2008061237/13rRUwbaqUM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07539393", "title": "An Evaluation of Visual Search Support in Maps", "doi": null, "abstractUrl": "/journal/tg/2017/01/07539393/13rRUwjGoLK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2019/1975/0/197500b743", "title": "IDD: A Dataset for Exploring Problems of Autonomous Navigation in Unconstrained Environments", "doi": null, "abstractUrl": "/proceedings-article/wacv/2019/197500b743/18j8NGRjKve", "parentPublication": { "id": "proceedings/wacv/2019/1975/0", "title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ase/2021/0337/0/033700a943", "title": "Unsupervised Labeling and Extraction of Phrase-based Concepts in Vulnerability Descriptions", "doi": null, "abstractUrl": "/proceedings-article/ase/2021/033700a943/1AjTfGOSCwU", "parentPublication": { "id": "proceedings/ase/2021/0337/0", "title": "2021 36th IEEE/ACM International Conference on Automated Software Engineering (ASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09904455", "title": "Multiple Forecast Visualizations (MFVs): Trade-offs in Trust and Performance in Multiple COVID-19 Forecast Visualizations", "doi": null, "abstractUrl": "/journal/tg/2023/01/09904455/1H1gjlaBqVO", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09917516", "title": "Geo-Storylines: Integrating Maps into Storyline Visualizations", "doi": null, "abstractUrl": "/journal/tg/2023/01/09917516/1HrexIf2zZe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ai/5555/01/10113715", "title": "Noisy Label Detection and Counterfactual Correction", "doi": null, "abstractUrl": "/journal/ai/5555/01/10113715/1MNbV9nYrXq", "parentPublication": { "id": "trans/ai", "title": "IEEE Transactions on Artificial Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08809750", "title": "Pattern-Driven Navigation in 2D Multiscale Visualizations with Scalable Insets", "doi": null, "abstractUrl": "/journal/tg/2020/01/08809750/1cHEu5CRoFq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/12/09573413", "title": "Adaptive Graph Guided Disambiguation for Partial Label Learning", "doi": null, "abstractUrl": "/journal/tp/2022/12/09573413/1xH5E3Yjgek", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09082802", "articleId": "1jrTVLo1tpC", "__typename": "AdjacentArticleType" }, "next": { "fno": "09035636", "articleId": "1iaeBQ4H756", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1iaeBQ4H756", "doi": "10.1109/TVCG.2020.2970522", "abstract": "We propose a photographic method to show scalar values of high dynamic range (HDR) by color mapping for 2D visualization. We combine (1) tone-mapping operators that transform the data to the display range of the monitor while preserving perceptually important features, based on a systematic evaluation, and (2) simulated glares that highlight high-value regions. Simulated glares are effective for highlighting small areas (of a few pixels) that may not be visible with conventional visualizations; through a controlled perception study, we confirm that glare is preattentive. The usefulness of our overall photographic HDR visualization is validated through the feedback of expert users.", "abstracts": [ { "abstractType": "Regular", "content": "We propose a photographic method to show scalar values of high dynamic range (HDR) by color mapping for 2D visualization. We combine (1) tone-mapping operators that transform the data to the display range of the monitor while preserving perceptually important features, based on a systematic evaluation, and (2) simulated glares that highlight high-value regions. Simulated glares are effective for highlighting small areas (of a few pixels) that may not be visible with conventional visualizations; through a controlled perception study, we confirm that glare is preattentive. The usefulness of our overall photographic HDR visualization is validated through the feedback of expert users.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose a photographic method to show scalar values of high dynamic range (HDR) by color mapping for 2D visualization. We combine (1) tone-mapping operators that transform the data to the display range of the monitor while preserving perceptually important features, based on a systematic evaluation, and (2) simulated glares that highlight high-value regions. Simulated glares are effective for highlighting small areas (of a few pixels) that may not be visible with conventional visualizations; through a controlled perception study, we confirm that glare is preattentive. The usefulness of our overall photographic HDR visualization is validated through the feedback of expert users.", "title": "Photographic High-Dynamic-Range Scalar Visualization", "normalizedTitle": "Photographic High-Dynamic-Range Scalar Visualization", "fno": "09035636", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualisation", "Image Colour Analysis", "Color Mapping", "Simulated Glares", "Photographic HDR Visualization", "Photographic High Dynamic Range Scalar Visualization", "2 D Visualization", "Tone Mapping Operators", "Data Visualization", "Image Color Analysis", "Pipelines", "Dynamic Range", "Visualization", "Two Dimensional Displays", "Monitoring", "Tone Mapping", "Glare", "High Dynamic Range Visualization", "2 D Diagrams" ], "authors": [ { "givenName": "Liang", "surname": "Zhou", "fullName": "Liang Zhou", "affiliation": "SCI Institute, University of Utah, Salt Lake City, UT, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Marc", "surname": "Rivinius", "fullName": "Marc Rivinius", "affiliation": "Visualization Research Center (VISUS), University of Stuttgart, Stuttgart, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Chris R.", "surname": "Johnson", "fullName": "Chris R. Johnson", "affiliation": "SCI Institute, University of Utah, Salt Lake City, UT, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Daniel", "surname": "Weiskopf", "fullName": "Daniel Weiskopf", "affiliation": "Visualization Research Center (VISUS), University of Stuttgart, Stuttgart, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2156-2167", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/aipr/2014/5921/0/07041912", "title": "High dynamic range (HDR) video processing for the exploitation of high bit-depth sensors in human-monitored surveillance", "doi": null, "abstractUrl": "/proceedings-article/aipr/2014/07041912/12OmNA14Aip", "parentPublication": { "id": "proceedings/aipr/2014/5921/0", "title": "2014 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csie/2009/3507/6/3507f583", "title": "Color Vision Based High Dynamic Range Images Rendering", "doi": null, "abstractUrl": "/proceedings-article/csie/2009/3507f583/12OmNCctfaE", "parentPublication": { "id": null, "title": null, "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2009/4534/0/05559003", "title": "Artifact-free High Dynamic Range imaging", "doi": null, "abstractUrl": "/proceedings-article/iccp/2009/05559003/12OmNCuDzub", "parentPublication": { "id": "proceedings/iccp/2009/4534/0", "title": "IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcc/2016/1853/0/07786173", "title": "High Dynamic Range Video Coding with Backward Compatibility", "doi": null, "abstractUrl": "/proceedings-article/dcc/2016/07786173/12OmNxcMSkC", "parentPublication": { "id": "proceedings/dcc/2016/1853/0", "title": "2016 Data Compression Conference (DCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icis/2016/0806/0/07550796", "title": "High dynamic range image composition using a linear interpolation approach", "doi": null, "abstractUrl": "/proceedings-article/icis/2016/07550796/12OmNxw5Bpw", "parentPublication": { "id": "proceedings/icis/2016/0806/0", "title": "2016 IEEE/ACIS 15th International Conference on Computer and Information Science (ICIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2004/2177/0/21770269", "title": "Two-Channel Technique for High Dynamic Range Image Visualization", "doi": null, "abstractUrl": "/proceedings-article/iv/2004/21770269/12OmNyo1nMX", "parentPublication": { "id": "proceedings/iv/2004/2177/0", "title": "Proceedings. Eighth International Conference on Information Visualisation, 2004. IV 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900a546", "title": "Multi-Bracket High Dynamic Range Imaging with Event Cameras", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900a546/1G56FK3UGPe", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900b031", "title": "Gamma-enhanced Spatial Attention Network for Efficient High Dynamic Range Imaging", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900b031/1G56nGzWShG", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900h706", "title": "Neural Auto-Exposure for High-Dynamic Range Object Detection", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900h706/1yeJuGu5Xvq", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900g293", "title": "End-to-end High Dynamic Range Camera Pipeline Optimization", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900g293/1yeK6nSzK1y", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08978585", "articleId": "1haUx0fpghW", "__typename": "AdjacentArticleType" }, "next": { "fno": "08977320", "articleId": "1h2AIkwYg4E", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1h2AIkwYg4E", "doi": "10.1109/TVCG.2020.2970512", "abstract": "Interaction plays a vital role during visual network exploration as users need to engage with both elements in the view (e.g., nodes, links) and interface controls (e.g., sliders, dropdown menus). Particularly as the size and complexity of a network grow, interactive displays supporting multimodal input (e.g., touch, speech, pen, gaze) exhibit the potential to facilitate fluid interaction during visual network exploration and analysis. While multimodal interaction with network visualization seems like a promising idea, many open questions remain. For instance, do users actually prefer multimodal input over unimodal input, and if so, why? Does it enable them to interact more naturally, or does having multiple modes of input confuse users? To answer such questions, we conducted a qualitative user study in the context of a network visualization tool, comparing speech- and touch-based unimodal interfaces to a multimodal interface combining the two. Our results confirm that participants strongly prefer multimodal input over unimodal input attributing their preference to: 1) the freedom of expression, 2) the complementary nature of speech and touch, and 3) integrated interactions afforded by the combination of the two modalities. We also describe the interaction patterns participants employed to perform common network visualization operations and highlight themes for future multimodal network visualization systems to consider.", "abstracts": [ { "abstractType": "Regular", "content": "Interaction plays a vital role during visual network exploration as users need to engage with both elements in the view (e.g., nodes, links) and interface controls (e.g., sliders, dropdown menus). Particularly as the size and complexity of a network grow, interactive displays supporting multimodal input (e.g., touch, speech, pen, gaze) exhibit the potential to facilitate fluid interaction during visual network exploration and analysis. While multimodal interaction with network visualization seems like a promising idea, many open questions remain. For instance, do users actually prefer multimodal input over unimodal input, and if so, why? Does it enable them to interact more naturally, or does having multiple modes of input confuse users? To answer such questions, we conducted a qualitative user study in the context of a network visualization tool, comparing speech- and touch-based unimodal interfaces to a multimodal interface combining the two. Our results confirm that participants strongly prefer multimodal input over unimodal input attributing their preference to: 1) the freedom of expression, 2) the complementary nature of speech and touch, and 3) integrated interactions afforded by the combination of the two modalities. We also describe the interaction patterns participants employed to perform common network visualization operations and highlight themes for future multimodal network visualization systems to consider.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Interaction plays a vital role during visual network exploration as users need to engage with both elements in the view (e.g., nodes, links) and interface controls (e.g., sliders, dropdown menus). Particularly as the size and complexity of a network grow, interactive displays supporting multimodal input (e.g., touch, speech, pen, gaze) exhibit the potential to facilitate fluid interaction during visual network exploration and analysis. While multimodal interaction with network visualization seems like a promising idea, many open questions remain. For instance, do users actually prefer multimodal input over unimodal input, and if so, why? Does it enable them to interact more naturally, or does having multiple modes of input confuse users? To answer such questions, we conducted a qualitative user study in the context of a network visualization tool, comparing speech- and touch-based unimodal interfaces to a multimodal interface combining the two. Our results confirm that participants strongly prefer multimodal input over unimodal input attributing their preference to: 1) the freedom of expression, 2) the complementary nature of speech and touch, and 3) integrated interactions afforded by the combination of the two modalities. We also describe the interaction patterns participants employed to perform common network visualization operations and highlight themes for future multimodal network visualization systems to consider.", "title": "Touch? Speech? or Touch and Speech? Investigating Multimodal Interaction for Visual Network Exploration and Analysis", "normalizedTitle": "Touch? Speech? or Touch and Speech? Investigating Multimodal Interaction for Visual Network Exploration and Analysis", "fno": "08977320", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualisation", "Graphical User Interfaces", "Mobile Handsets", "Visual Network Exploration", "Interactive Displays", "Multimodal Input", "Network Visualization Tool", "Multimodal Interface", "Interaction Patterns Participants", "Multimodal Network Visualization Systems", "Network Visualization Operations", "Visualization", "Encoding", "Tools", "Data Visualization", "Speech Recognition", "Natural Languages", "Task Analysis", "Multimodal Interaction", "Network Visualizations", "Natural Language Interfaces" ], "authors": [ { "givenName": "Ayshwarya", "surname": "Saktheeswaran", "fullName": "Ayshwarya Saktheeswaran", "affiliation": "Georgia Institute of Technology, Atlanta, GA", "__typename": "ArticleAuthorType" }, { "givenName": "Arjun", "surname": "Srinivasan", "fullName": "Arjun Srinivasan", "affiliation": "Georgia Institute of Technology, Atlanta, GA", "__typename": "ArticleAuthorType" }, { "givenName": "John", "surname": "Stasko", "fullName": "John Stasko", "affiliation": "Georgia Institute of Technology, Atlanta, GA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2168-2179", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cscs/2017/1839/0/07968566", "title": "Multimodal Interface for Ambient Assisted Living", "doi": null, "abstractUrl": "/proceedings-article/cscs/2017/07968566/12OmNARRYpY", "parentPublication": { "id": "proceedings/cscs/2017/1839/0", "title": "2017 21st International Conference on Control Systems and Computer Science (CSCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2003/1890/0/18900151", "title": "Relative Performance Using Haptic and/or Touch-Produced Auditory Cues in a Remote Absolute Texture Identification Task", "doi": null, "abstractUrl": "/proceedings-article/haptics/2003/18900151/12OmNzDehah", "parentPublication": { "id": "proceedings/haptics/2003/1890/0", "title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/01/08019860", "title": "Orko: Facilitating Multimodal Interaction for Visual Exploration and Analysis of Networks", "doi": null, "abstractUrl": "/journal/tg/2018/01/08019860/13rRUx0gefo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900e578", "title": "Improving Multimodal Speech Recognition by Data Augmentation and Speech Representations", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900e578/1G561ezEc9O", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09894081", "title": "Putting Vision and Touch Into Conflict: Results from a Multimodal Mixed Reality Setup", "doi": null, "abstractUrl": "/journal/tg/5555/01/09894081/1GIqtQDhf8I", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cmbs/2022/6770/0/677000a199", "title": "Leveraging Clinical BERT in Multimodal Mortality Prediction Models for COVID-19", "doi": null, "abstractUrl": "/proceedings-article/cmbs/2022/677000a199/1GhW8bBO4iQ", "parentPublication": { "id": "proceedings/cmbs/2022/6770/0", "title": "2022 IEEE 35th International Symposium on Computer-Based Medical Systems (CBMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/08/09023002", "title": "Interweaving Multimodal Interaction With Flexible Unit Visualizations for Data Exploration", "doi": null, "abstractUrl": "/journal/tg/2021/08/09023002/1hTHRTEQgRG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093414", "title": "Exploring Hate Speech Detection in Multimodal Publications", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093414/1jPbxi0Vk40", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800n3286", "title": "MMTM: Multimodal Transfer Module for CNN Fusion", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800n3286/1m3ojQrj4iY", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2020/9134/0/913400a714", "title": "MIVA: Multimodal Interactions for Facilitating Visual Analysis with Multiple Coordinated Views", "doi": null, "abstractUrl": "/proceedings-article/iv/2020/913400a714/1rSR8lx5snS", "parentPublication": { "id": "proceedings/iv/2020/9134/0", "title": "2020 24th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09035636", "articleId": "1iaeBQ4H756", "__typename": "AdjacentArticleType" }, "next": { "fno": "08977505", "articleId": "1h2AIHeB46A", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1h2AIHeB46A", "doi": "10.1109/TVCG.2020.2970523", "abstract": "Graph drawing readability metrics are routinely used to assess and create node-link layouts of network data. Existing readability metrics fall short in three ways. The many count-based metrics such as edge-edge or node-edge crossings simply provide integer counts, missing the opportunity to quantify the amount of overlap between items, which may vary in size, at a more fine-grained level. Current metrics focus solely on single-level topological structure, ignoring the possibility of multi-level structure such as large and thus highly salient metanodes. Most current metrics focus on the measurement of clutter in the form of crossings and overlaps, and do not take into account the trade-off between the clutter and the information sparsity of the drawing, which we refer to as sprawl. We propose an area-aware approach to clutter metrics that tracks the extent of geometric overlaps between node-node, node-edge, and edge-edge pairs in detail. It handles variable-size nodes and explicitly treats metanodes and leaf nodes uniformly. We call the combination of a sprawl metric and an area-aware clutter metric a sprawlter metric. We present an instantiation of the sprawlter metrics featuring a formal and thorough discussion of the crucial component, the penalty mapping function. We implement and validate our proposed metrics with extensive computational analysis of graph layouts, considering four layout algorithms and 56 layouts encompassing both real-world data and synthetic examples illustrating specific configurations of interest.", "abstracts": [ { "abstractType": "Regular", "content": "Graph drawing readability metrics are routinely used to assess and create node-link layouts of network data. Existing readability metrics fall short in three ways. The many count-based metrics such as edge-edge or node-edge crossings simply provide integer counts, missing the opportunity to quantify the amount of overlap between items, which may vary in size, at a more fine-grained level. Current metrics focus solely on single-level topological structure, ignoring the possibility of multi-level structure such as large and thus highly salient metanodes. Most current metrics focus on the measurement of clutter in the form of crossings and overlaps, and do not take into account the trade-off between the clutter and the information sparsity of the drawing, which we refer to as sprawl. We propose an area-aware approach to clutter metrics that tracks the extent of geometric overlaps between node-node, node-edge, and edge-edge pairs in detail. It handles variable-size nodes and explicitly treats metanodes and leaf nodes uniformly. We call the combination of a sprawl metric and an area-aware clutter metric a sprawlter metric. We present an instantiation of the sprawlter metrics featuring a formal and thorough discussion of the crucial component, the penalty mapping function. We implement and validate our proposed metrics with extensive computational analysis of graph layouts, considering four layout algorithms and 56 layouts encompassing both real-world data and synthetic examples illustrating specific configurations of interest.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Graph drawing readability metrics are routinely used to assess and create node-link layouts of network data. Existing readability metrics fall short in three ways. The many count-based metrics such as edge-edge or node-edge crossings simply provide integer counts, missing the opportunity to quantify the amount of overlap between items, which may vary in size, at a more fine-grained level. Current metrics focus solely on single-level topological structure, ignoring the possibility of multi-level structure such as large and thus highly salient metanodes. Most current metrics focus on the measurement of clutter in the form of crossings and overlaps, and do not take into account the trade-off between the clutter and the information sparsity of the drawing, which we refer to as sprawl. We propose an area-aware approach to clutter metrics that tracks the extent of geometric overlaps between node-node, node-edge, and edge-edge pairs in detail. It handles variable-size nodes and explicitly treats metanodes and leaf nodes uniformly. We call the combination of a sprawl metric and an area-aware clutter metric a sprawlter metric. We present an instantiation of the sprawlter metrics featuring a formal and thorough discussion of the crucial component, the penalty mapping function. We implement and validate our proposed metrics with extensive computational analysis of graph layouts, considering four layout algorithms and 56 layouts encompassing both real-world data and synthetic examples illustrating specific configurations of interest.", "title": "The Sprawlter Graph Readability Metric: Combining Sprawl and Area-Aware Clutter", "normalizedTitle": "The Sprawlter Graph Readability Metric: Combining Sprawl and Area-Aware Clutter", "fno": "08977505", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualisation", "Graph Theory", "Telecommunication Network Topology", "Node Link Layouts", "Count Based Metrics", "Node Edge Crossings", "Integer Counts", "Fine Grained Level", "Current Metrics Focus", "Single Level Topological Structure", "Multilevel Structure", "Clutter Metrics", "Geometric Overlaps", "Edge Edge Pairs", "Variable Size Nodes", "Leaf Nodes", "Sprawl Metric", "Sprawlter Metrics", "Graph Layouts", "Sprawlter Graph Readability Metric", "Graph Drawing Readability Metrics", "Area Aware Clutter Metric", "Salient Metanodes", "Measurement", "Layout", "Clutter", "Readability Metrics", "Compounds", "Visualization", "Periodic Structures", "Graph Drawing", "Graph Drawing Metrics", "Readability Metrics", "Aesthetic Criteria" ], "authors": [ { "givenName": "Zipeng", "surname": "Liu", "fullName": "Zipeng Liu", "affiliation": "University of British Columbia", "__typename": "ArticleAuthorType" }, { "givenName": "Takayuki", "surname": "Itoh", "fullName": "Takayuki Itoh", "affiliation": "Ochanomizu University", "__typename": "ArticleAuthorType" }, { "givenName": "Jessica Q.", "surname": "Dawson", "fullName": "Jessica Q. Dawson", "affiliation": "University of British Columbia", "__typename": "ArticleAuthorType" }, { "givenName": "Tamara", "surname": "Munzner", "fullName": "Tamara Munzner", "affiliation": "University of British Columbia", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2180-2191", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/pacificvis/2015/6879/0/07156354", "title": "Attribute-driven edge bundling for general graphs with applications in trail analysis", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2015/07156354/12OmNCaLEnG", "parentPublication": { "id": "proceedings/pacificvis/2015/6879/0", "title": "2015 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csit/2016/8914/0/07549476", "title": "Application domain and programming language readability yardsticks", "doi": null, "abstractUrl": "/proceedings-article/csit/2016/07549476/12OmNCfjev8", "parentPublication": { "id": "proceedings/csit/2016/8914/0", "title": "2016 7th International Conference on Computer Science and Information Technology (CSIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2011/935/0/05742389", "title": "Multilevel agglomerative edge bundling for visualizing large graphs", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2011/05742389/12OmNxj233Y", "parentPublication": { "id": "proceedings/pacificvis/2011/935/0", "title": "2011 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2012/4771/0/4771a093", "title": "Clutter Reduction in Multi-dimensional Visualization of Incomplete Data Using Sugiyama Algorithm", "doi": null, "abstractUrl": "/proceedings-article/iv/2012/4771a093/12OmNzBOhHa", "parentPublication": { "id": "proceedings/iv/2012/4771/0", "title": "2012 16th International Conference on Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/2010/04/tts2010040546", "title": "Learning a Metric for Code Readability", "doi": null, "abstractUrl": "/journal/ts/2010/04/tts2010040546/13rRUygT7gV", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/01/07192724", "title": "AmbiguityVis: Visualization of Ambiguity in Graph Layouts", "doi": null, "abstractUrl": "/journal/tg/2016/01/07192724/13rRUyuegpa", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpc/2022/9298/0/929800a214", "title": "An Empirical Investigation on the Trade-off between Smart Contract Readability and Gas Consumption", "doi": null, "abstractUrl": "/proceedings-article/icpc/2022/929800a214/1EpKH3lfMRO", "parentPublication": { "id": "proceedings/icpc/2022/9298/0", "title": "2022 IEEE/ACM 30th International Conference on Program Comprehension (ICPC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icnisc/2022/5351/0/535100a155", "title": "HED-CNN based Ionospheric Clutter Extraction for HF Range-Doppler Spectrum", "doi": null, "abstractUrl": "/proceedings-article/icnisc/2022/535100a155/1KYtoZqU3de", "parentPublication": { "id": "proceedings/icnisc/2022/5351/0", "title": "2022 8th Annual International Conference on Network and Information Systems for Computers (ICNISC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2019/04/08739137", "title": "Evaluating the Readability of Force Directed Graph Layouts: A Deep Learning Approach", "doi": null, "abstractUrl": "/magazine/cg/2019/04/08739137/1aXM6mNkouI", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08977320", "articleId": "1h2AIkwYg4E", "__typename": "AdjacentArticleType" }, "next": { "fno": "08977377", "articleId": "1h2AJ4jdnFK", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1h2AJ4jdnFK", "doi": "10.1109/TVCG.2020.2970525", "abstract": "The development of usable visualization solutions is essential for ensuring both their adoption and effectiveness. User-centered design principles, which involve users throughout the entire development process, have been shown to be effective in numerous information visualization endeavors. We describe how we applied these principles in scientific visualization over a two year collaboration to develop a hybrid in situ/post hoc solution tailored towards combustion researcher needs. Furthermore, we examine the importance of user-centered design and lessons learned over the design process in an effort to aid others seeking to develop effective scientific visualization solutions.", "abstracts": [ { "abstractType": "Regular", "content": "The development of usable visualization solutions is essential for ensuring both their adoption and effectiveness. User-centered design principles, which involve users throughout the entire development process, have been shown to be effective in numerous information visualization endeavors. We describe how we applied these principles in scientific visualization over a two year collaboration to develop a hybrid in situ/post hoc solution tailored towards combustion researcher needs. Furthermore, we examine the importance of user-centered design and lessons learned over the design process in an effort to aid others seeking to develop effective scientific visualization solutions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The development of usable visualization solutions is essential for ensuring both their adoption and effectiveness. User-centered design principles, which involve users throughout the entire development process, have been shown to be effective in numerous information visualization endeavors. We describe how we applied these principles in scientific visualization over a two year collaboration to develop a hybrid in situ/post hoc solution tailored towards combustion researcher needs. Furthermore, we examine the importance of user-centered design and lessons learned over the design process in an effort to aid others seeking to develop effective scientific visualization solutions.", "title": "A User-Centered Design Study in Scientific Visualization Targeting Domain Experts", "normalizedTitle": "A User-Centered Design Study in Scientific Visualization Targeting Domain Experts", "fno": "08977377", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualisation", "User Centred Design", "User Centered Design Principles", "Information Visualization", "User Centered Design Study", "Domain Experts", "Visualization Solution", "Data Visualization", "User Centered Design", "Visualization", "Usability", "Task Analysis", "Collaboration", "Combustion", "Italic Xmlns Ali Http Www Niso Org Schemas Ali 1 0 Xmlns Mml Http Www W 3 Org 1998 Math Math ML Xmlns Xlink Http Www W 3 Org 1999 Xlink Xmlns Xsi Http Www W 3 Org 2001 XML Schema Instance In Situ Italic Data Visualization", "Usability Studies", "Design Studies", "Qualitative Evaluation", "User Interfaces" ], "authors": [ { "givenName": "Yucong", "surname": "Ye", "fullName": "Yucong Ye", "affiliation": "Department of Computer Science, University of California, Davis", "__typename": "ArticleAuthorType" }, { "givenName": "Franz", "surname": "Sauer", "fullName": "Franz Sauer", "affiliation": "Department of Computer Science, University of California, Davis", "__typename": "ArticleAuthorType" }, { "givenName": "Kwan-Liu", "surname": "Ma", "fullName": "Kwan-Liu Ma", "affiliation": "Department of Computer Science, University of California, Davis", "__typename": "ArticleAuthorType" }, { "givenName": "Konduri", "surname": "Aditya", "fullName": "Konduri Aditya", "affiliation": "Combustion Research FacilitySandia National Laboratories", "__typename": "ArticleAuthorType" }, { "givenName": "Jacqueline", "surname": "Chen", "fullName": "Jacqueline Chen", "affiliation": "Combustion Research FacilitySandia National Laboratories", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2192-2203", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/1999/0093/0/00930096", "title": "User-Centered Design and Evaluation of a Real-Time Battlefield Visualization Virtual Environment", "doi": null, "abstractUrl": "/proceedings-article/vr/1999/00930096/12OmNA2cYEt", "parentPublication": { "id": "proceedings/vr/1999/0093/0", "title": "Proceedings of Virtual Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vizsec/2015/7599/0/07312771", "title": "Unlocking user-centered design methods for building cyber security visualizations", "doi": null, "abstractUrl": "/proceedings-article/vizsec/2015/07312771/12OmNAWH9Ev", "parentPublication": { "id": "proceedings/vizsec/2015/7599/0", "title": "2015 IEEE Symposium on Visualization for Cyber Security (VizSec)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/srii/2012/4770/0/4770a697", "title": "User Centered Design of Innovative E-Service Solutions - A Scientific Approach to User Fascination", "doi": null, "abstractUrl": "/proceedings-article/srii/2012/4770a697/12OmNCvLY08", "parentPublication": { "id": "proceedings/srii/2012/4770/0", "title": "Annual SRII Global Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vizsec/2005/9477/0/01532062", "title": "A user-centered look at glyph-based security visualization", "doi": null, "abstractUrl": "/proceedings-article/vizsec/2005/01532062/12OmNxR5UPi", "parentPublication": { "id": "proceedings/vizsec/2005/9477/0", "title": "IEEE Workshop on Visualization for Computer Security 2005 (VizSEC 05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2011/0868/0/06004027", "title": "Developing and Applying a User-Centered Model for the Design and Implementation of Information Visualization Tools", "doi": null, "abstractUrl": "/proceedings-article/iv/2011/06004027/12OmNyQph8m", "parentPublication": { "id": "proceedings/iv/2011/0868/0", "title": "2011 15th International Conference on Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/e-science/2014/4288/1/06972276", "title": "Experiences with User-Centered Design for the Tigres Workflow API", "doi": null, "abstractUrl": "/proceedings-article/e-science/2014/06972276/12OmNzzP5Hq", "parentPublication": { "id": "proceedings/e-science/2014/4288/1", "title": "2014 IEEE 10th International Conference on e-Science (e-Science)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/01/08017610", "title": "Activity-Centered Domain Characterization for Problem-Driven Scientific Visualization", "doi": null, "abstractUrl": "/journal/tg/2018/01/08017610/13rRUwhHcQX", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/so/2009/01/mso2009010096", "title": "Usability and User-Centered Design in Scientific Software Development", "doi": null, "abstractUrl": "/magazine/so/2009/01/mso2009010096/13rRUwvT9eM", "parentPublication": { "id": "mags/so", "title": "IEEE Software", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a336", "title": "User-Centered Design and Evaluation of ARTTS: an Augmented Reality Triage Tool Suite for Mass Casualty Incidents", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a336/1JrR3eLmZX2", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/works/2022/5191/0/519100a019", "title": "A Domain-Specific Composition Environment for Provenance Query of Scientific Workflows", "doi": null, "abstractUrl": "/proceedings-article/works/2022/519100a019/1KckqxKZTUY", "parentPublication": { "id": "proceedings/works/2022/5191/0", "title": "2022 IEEE/ACM Workshop on Workflows in Support of Large-Scale Science (WORKS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08977505", "articleId": "1h2AIHeB46A", "__typename": "AdjacentArticleType" }, "next": { "fno": "08567954", "articleId": "17D45XDIXXS", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45XDIXXS", "doi": "10.1109/TVCG.2018.2885750", "abstract": "An importance measure of 3D objects inspired by human perception has a range of applications since people want computers to behave like humans in many tasks. This paper revisits a well-defined measure, distinction of 3D surface mesh, which indicates how important a region of a mesh is with respect to classification. We develop a method to compute it based on a classification network and a Markov Random Field (MRF). The classification network learns view-based distinction by handling multiple views of a 3D object. Using a classification network has an advantage of avoiding the training data problem which has become a major obstacle of applying deep learning to 3D object understanding tasks. The MRF estimates the parameters of a linear model for combining the view-based distinction maps. The experiments using several publicly accessible datasets show that the distinctive regions detected by our method are not just significantly different from those detected by methods based on handcrafted features, but more consistent with human perception. We also compare it with other perceptual measures and quantitatively evaluate its performance in the context of two applications. Furthermore, due to the view-based nature of our method, we are able to easily extend mesh distinction to 3D scenes containing multiple objects.", "abstracts": [ { "abstractType": "Regular", "content": "An importance measure of 3D objects inspired by human perception has a range of applications since people want computers to behave like humans in many tasks. This paper revisits a well-defined measure, distinction of 3D surface mesh, which indicates how important a region of a mesh is with respect to classification. We develop a method to compute it based on a classification network and a Markov Random Field (MRF). The classification network learns view-based distinction by handling multiple views of a 3D object. Using a classification network has an advantage of avoiding the training data problem which has become a major obstacle of applying deep learning to 3D object understanding tasks. The MRF estimates the parameters of a linear model for combining the view-based distinction maps. The experiments using several publicly accessible datasets show that the distinctive regions detected by our method are not just significantly different from those detected by methods based on handcrafted features, but more consistent with human perception. We also compare it with other perceptual measures and quantitatively evaluate its performance in the context of two applications. Furthermore, due to the view-based nature of our method, we are able to easily extend mesh distinction to 3D scenes containing multiple objects.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "An importance measure of 3D objects inspired by human perception has a range of applications since people want computers to behave like humans in many tasks. This paper revisits a well-defined measure, distinction of 3D surface mesh, which indicates how important a region of a mesh is with respect to classification. We develop a method to compute it based on a classification network and a Markov Random Field (MRF). The classification network learns view-based distinction by handling multiple views of a 3D object. Using a classification network has an advantage of avoiding the training data problem which has become a major obstacle of applying deep learning to 3D object understanding tasks. The MRF estimates the parameters of a linear model for combining the view-based distinction maps. The experiments using several publicly accessible datasets show that the distinctive regions detected by our method are not just significantly different from those detected by methods based on handcrafted features, but more consistent with human perception. We also compare it with other perceptual measures and quantitatively evaluate its performance in the context of two applications. Furthermore, due to the view-based nature of our method, we are able to easily extend mesh distinction to 3D scenes containing multiple objects.", "title": "Distinction of 3D Objects and Scenes via Classification Network and Markov Random Field", "normalizedTitle": "Distinction of 3D Objects and Scenes via Classification Network and Markov Random Field", "fno": "08567954", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Feature Extraction", "Image Classification", "Image Representation", "Learning Artificial Intelligence", "Markov Processes", "Neural Nets", "Object Detection", "Object Recognition", "Multiple Objects", "Mesh Distinction", "View Based Nature", "Distinctive Regions", "View Based Distinction Maps", "3 D Object Understanding Tasks", "Markov Random Field", "Classification Network", "3 D Surface Mesh", "Human Perception", "Three Dimensional Displays", "Task Analysis", "Shape", "Two Dimensional Displays", "Feature Extraction", "Training", "Markov Random Fields", "3 D Mesh", "Distinction", "Neural Network", "Markov Random Field" ], "authors": [ { "givenName": "Ran", "surname": "Song", "fullName": "Ran Song", "affiliation": "Centre for Secure, Intelligent and Usable Systems, School of Computing, Engineering and Mathematics, University of Brighton, Brighton, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Yonghuai", "surname": "Liu", "fullName": "Yonghuai Liu", "affiliation": "Department of Computer Science, Edge Hill University, Ormskirk, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Paul L.", "surname": "Rosin", "fullName": "Paul L. Rosin", "affiliation": "School of Computer Science and Informatics, Cardiff University, Cardiff, United Kingdom", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2204-2218", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2014/5209/0/5209a930", "title": "Fusion of Image Segmentations under Markov, Random Fields", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209a930/12OmNBUAvZ9", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/camp/1997/7987/0/79870220", "title": "Circuital Markov random fields for analog edge detection", "doi": null, "abstractUrl": "/proceedings-article/camp/1997/79870220/12OmNCdBDXt", "parentPublication": { "id": "proceedings/camp/1997/7987/0", "title": "Computer Architectures for Machine Perception, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fskd/2009/3735/5/3735e442", "title": "SAR Image Segmentation Based on Markov Random Field Model and Multiscale Technology", "doi": null, "abstractUrl": "/proceedings-article/fskd/2009/3735e442/12OmNxZkhti", "parentPublication": { "id": "proceedings/fskd/2009/3735/5", "title": "Fuzzy Systems and Knowledge Discovery, Fourth International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2007/2822/1/04378735", "title": "Text/Non-text Ink Stroke Classification in Japanese Handwriting Based on Markov Random Fields", "doi": null, "abstractUrl": "/proceedings-article/icdar/2007/04378735/12OmNxbEtLz", "parentPublication": { "id": "proceedings/icdar/2007/2822/1", "title": "Ninth International Conference on Document Analysis and Recognition (ICDAR 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2002/1695/4/169540201", "title": "Face Detection and Synthesis Using Markov Random Field Models", "doi": null, "abstractUrl": "/proceedings-article/icpr/2002/169540201/12OmNyfdOX0", "parentPublication": { "id": "proceedings/icpr/2002/1695/4", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icassp/2004/8484/4/01326913", "title": "On iterative source-channel image decoding with Markov random field source models", "doi": null, "abstractUrl": "/proceedings-article/icassp/2004/01326913/12OmNzUgdgz", "parentPublication": { "id": "proceedings/icassp/2004/8484/4", "title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1994/6952/2/00413557", "title": "Segmentation of range and intensity images using multiscale Markov random field representations", "doi": null, "abstractUrl": "/proceedings-article/icip/1994/00413557/12OmNzcxZhC", "parentPublication": { "id": "proceedings/icip/1994/6952/2", "title": "Proceedings of 1st International Conference on Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/cc/2019/03/07872428", "title": "A Markov Random Field Based Approach for Analyzing Supercomputer System Logs", "doi": null, "abstractUrl": "/journal/cc/2019/03/07872428/13rRUwh80Jx", "parentPublication": { "id": "trans/cc", "title": "IEEE Transactions on Cloud Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2006/11/i1830", "title": "Dense Photometric Stereo: A Markov Random Field Approach", "doi": null, "abstractUrl": "/journal/tp/2006/11/i1830/13rRUygT7tT", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2018/8425/0/842500a662", "title": "A Data-Driven Prior on Facet Orientation for Semantic Mesh Labeling", "doi": null, "abstractUrl": "/proceedings-article/3dv/2018/842500a662/17D45WgziON", "parentPublication": { "id": "proceedings/3dv/2018/8425/0", "title": "2018 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08977377", "articleId": "1h2AJ4jdnFK", "__typename": "AdjacentArticleType" }, "next": { "fno": "08546802", "articleId": "17D45WrVg2d", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45WrVg2d", "doi": "10.1109/TVCG.2018.2883630", "abstract": "Tracking the temporal evolution of features in time-varying data is a key method in visualization. For typical feature definitions, such as vortices, objects are sparsely distributed over the data domain. In this paper, we present a novel approach for tracking both sparse and space-filling features. While the former comprise only a small fraction of the domain, the latter form a set of objects whose union covers the domain entirely while the individual objects are mutually disjunct. Our approach determines the assignment of features between two successive time-steps by solving two graph optimization problems. It first resolves one-to-one assignments of features by computing a maximum-weight, maximum-cardinality matching on a weighted bi-partite graph. Second, our algorithm detects events by creating a graph of potentially conflicting event explanations and finding a weighted, independent set in it. We demonstrate our method's effectiveness on synthetic and simulation data sets, the former of which enables quantitative evaluation because of the availability of ground-truth information. Here, our method performs on par or better than a well-established reference algorithm. In addition, manual visual inspection by our collaborators confirm the results' plausibility for simulation data.", "abstracts": [ { "abstractType": "Regular", "content": "Tracking the temporal evolution of features in time-varying data is a key method in visualization. For typical feature definitions, such as vortices, objects are sparsely distributed over the data domain. In this paper, we present a novel approach for tracking both sparse and space-filling features. While the former comprise only a small fraction of the domain, the latter form a set of objects whose union covers the domain entirely while the individual objects are mutually disjunct. Our approach determines the assignment of features between two successive time-steps by solving two graph optimization problems. It first resolves one-to-one assignments of features by computing a maximum-weight, maximum-cardinality matching on a weighted bi-partite graph. Second, our algorithm detects events by creating a graph of potentially conflicting event explanations and finding a weighted, independent set in it. We demonstrate our method's effectiveness on synthetic and simulation data sets, the former of which enables quantitative evaluation because of the availability of ground-truth information. Here, our method performs on par or better than a well-established reference algorithm. In addition, manual visual inspection by our collaborators confirm the results' plausibility for simulation data.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Tracking the temporal evolution of features in time-varying data is a key method in visualization. For typical feature definitions, such as vortices, objects are sparsely distributed over the data domain. In this paper, we present a novel approach for tracking both sparse and space-filling features. While the former comprise only a small fraction of the domain, the latter form a set of objects whose union covers the domain entirely while the individual objects are mutually disjunct. Our approach determines the assignment of features between two successive time-steps by solving two graph optimization problems. It first resolves one-to-one assignments of features by computing a maximum-weight, maximum-cardinality matching on a weighted bi-partite graph. Second, our algorithm detects events by creating a graph of potentially conflicting event explanations and finding a weighted, independent set in it. We demonstrate our method's effectiveness on synthetic and simulation data sets, the former of which enables quantitative evaluation because of the availability of ground-truth information. Here, our method performs on par or better than a well-established reference algorithm. In addition, manual visual inspection by our collaborators confirm the results' plausibility for simulation data.", "title": "Feature Tracking by Two-Step Optimization", "normalizedTitle": "Feature Tracking by Two-Step Optimization", "fno": "08546802", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computational Complexity", "Data Visualisation", "Feature Extraction", "Graph Theory", "Optimisation", "Two Step Optimization", "Time Varying Data", "Feature Definition", "Data Domain", "Sparse Space Filling Features", "Graph Optimization Problems", "Maximum Weight Matching", "Maximum Cardinality Matching", "Weighted Bi Partite Graph", "Feature Tracking", "Feature Extraction", "Target Tracking", "Optimization", "Data Visualization", "Data Models", "Analytical Models", "Heuristic Algorithms", "Global Optimization", "Simulation Output Analysis", "Flow Visualization" ], "authors": [ { "givenName": "Andrea", "surname": "Schnorr", "fullName": "Andrea Schnorr", "affiliation": "JARA – High-Performance Computing and the Visual Computing Institute, RWTH Aachen University, Aachen, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Dirk", "surname": "N. Helmrich", "fullName": "Dirk N. Helmrich", "affiliation": "JARA – High-Performance Computing and the Visual Computing Institute, RWTH Aachen University, Aachen, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Dominik", "surname": "Denker", "fullName": "Dominik Denker", "affiliation": "Institute for Combustion Technology, RWTH Aachen University, Aachen, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Torsten W.", "surname": "Kuhlen", "fullName": "Torsten W. Kuhlen", "affiliation": "JARA – High-Performance Computing and the Visual Computing Institute, RWTH Aachen University, Aachen, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Bernd", "surname": "Hentschel", "fullName": "Bernd Hentschel", "affiliation": "JARA – High-Performance Computing and the Visual Computing Institute, RWTH Aachen University, Aachen, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2219-2233", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/visapp/2014/8133/3/07295096", "title": "Feature matching using CO-inertia analysis for people tracking", "doi": null, "abstractUrl": "/proceedings-article/visapp/2014/07295096/12OmNvsm6zh", "parentPublication": { "id": "proceedings/visapp/2014/8133/2", "title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109d607", "title": "Optimization of Target Objects for Natural Feature Tracking", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109d607/12OmNyv7m5x", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2015/9711/0/5720a751", "title": "Attributed Graphs for Tracking Multiple Objects in Structured Sports Videos", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2015/5720a751/12OmNzVoBNd", "parentPublication": { "id": "proceedings/iccvw/2015/9711/0", "title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209d678", "title": "Unsupervised Tracking from Clustered Graph Patterns", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209d678/12OmNzwHvrO", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vspets/2005/9424/0/01570894", "title": "Object tracking with dynamic feature graph", "doi": null, "abstractUrl": "/proceedings-article/vspets/2005/01570894/12OmNzwpUhP", "parentPublication": { "id": "proceedings/vspets/2005/9424/0", "title": "Proceedings. 2nd Joint IEEE International Workshop on Visual Surveillance and Performance Evaluation of Tracking and Surveillance (VS-PETS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2017/2636/0/263600a007", "title": "Learning Deep Appearance Feature for Multi-target Tracking", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2017/263600a007/1ap5AZ64kLK", "parentPublication": { "id": "proceedings/icvrv/2017/2636/0", "title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2019/9552/0/955200b774", "title": "Robust Deep Tracking with Two-step Augmentation Discriminative Correlation Filters", "doi": null, "abstractUrl": "/proceedings-article/icme/2019/955200b774/1cdOHHPL6V2", "parentPublication": { "id": "proceedings/icme/2019/9552/0", "title": "2019 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2021/02/09311214", "title": "A Confidence-Guided Technique for Tracking Time-Varying Features", "doi": null, "abstractUrl": "/magazine/cs/2021/02/09311214/1pYWIN9JCTe", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09413110", "title": "Robust Visual Object Tracking with Two-Stream Residual Convolutional Networks", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09413110/1tmjzhcSj28", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieit/2021/2563/0/256300a244", "title": "Object tracking algorithm based on fusion of SiamFC and Feature Pyramid Network", "doi": null, "abstractUrl": "/proceedings-article/ieit/2021/256300a244/1wHKqvHr7mo", "parentPublication": { "id": "proceedings/ieit/2021/2563/0", "title": "2021 International Conference on Internet, Education and Information Technology (IEIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08567954", "articleId": "17D45XDIXXS", "__typename": "AdjacentArticleType" }, "next": { "fno": "08573859", "articleId": "17D45We0UEn", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45We0UEn", "doi": "10.1109/TVCG.2018.2886322", "abstract": "This paper extends the recently proposed power-particle-based fluid simulation method with staggered discretization, GPU implementation, and adaptive sampling, largely enhancing the efficiency and usability of the method. In contrast to the original formulation which uses co-located pressures and velocities, in this paper, a staggered scheme is adapted to the Power Particles to benefit visual details and computing efficiency. Meanwhile, we propose a novel facet-based power diagrams construction algorithm suitable for parallelization and explore its GPU implementation, achieving an order of magnitude boost in performance over the existing code library. In addition, to utilize the potential of Power Particles to control individual cell volume, we apply adaptive particle sampling to improve the detail level with varying resolution. The proposed method can be entirely carried out on GPUs, and our extensive experiments validate our method both in terms of efficiency and visual quality.", "abstracts": [ { "abstractType": "Regular", "content": "This paper extends the recently proposed power-particle-based fluid simulation method with staggered discretization, GPU implementation, and adaptive sampling, largely enhancing the efficiency and usability of the method. In contrast to the original formulation which uses co-located pressures and velocities, in this paper, a staggered scheme is adapted to the Power Particles to benefit visual details and computing efficiency. Meanwhile, we propose a novel facet-based power diagrams construction algorithm suitable for parallelization and explore its GPU implementation, achieving an order of magnitude boost in performance over the existing code library. In addition, to utilize the potential of Power Particles to control individual cell volume, we apply adaptive particle sampling to improve the detail level with varying resolution. The proposed method can be entirely carried out on GPUs, and our extensive experiments validate our method both in terms of efficiency and visual quality.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper extends the recently proposed power-particle-based fluid simulation method with staggered discretization, GPU implementation, and adaptive sampling, largely enhancing the efficiency and usability of the method. In contrast to the original formulation which uses co-located pressures and velocities, in this paper, a staggered scheme is adapted to the Power Particles to benefit visual details and computing efficiency. Meanwhile, we propose a novel facet-based power diagrams construction algorithm suitable for parallelization and explore its GPU implementation, achieving an order of magnitude boost in performance over the existing code library. In addition, to utilize the potential of Power Particles to control individual cell volume, we apply adaptive particle sampling to improve the detail level with varying resolution. The proposed method can be entirely carried out on GPUs, and our extensive experiments validate our method both in terms of efficiency and visual quality.", "title": "Fluid Simulation with Adaptive Staggered Power Particles on GPUs", "normalizedTitle": "Fluid Simulation with Adaptive Staggered Power Particles on GPUs", "fno": "08573859", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computational Fluid Dynamics", "Computer Graphics", "Flow Simulation", "Graphics Processing Units", "Sampling Methods", "Adaptive Staggered Power Particles", "GPU", "Staggered Discretization", "Adaptive Particle Sampling", "Power Particle Based Fluid Simulation", "Facet Based Power Diagrams Construction Algorithm", "Visual Quality", "Visualization", "Adaptation Models", "Computational Modeling", "Graphics Processing Units", "Libraries", "Liquids", "Physically Based Modeling", "Fluid Simulation", "Power Diagrams", "GPU Parallelization", "Adaptive Sampling" ], "authors": [ { "givenName": "Xiao", "surname": "Zhai", "fullName": "Xiao Zhai", "affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, Beihang University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Fei", "surname": "Hou", "fullName": "Fei Hou", "affiliation": "State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Hong", "surname": "Qin", "fullName": "Hong Qin", "affiliation": "Department of Computer Science, Stony Brook University, New York, NY, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Aimin", "surname": "Hao", "fullName": "Aimin Hao", "affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, Beihang University, Beijing, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2234-2246", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cw/2015/9403/0/9403a375", "title": "A Particle-Based Real-Time CG Rendering of Carbonated Water with Automatic Release of Bubbles", "doi": null, "abstractUrl": "/proceedings-article/cw/2015/9403a375/12OmNA14Ach", "parentPublication": { "id": "proceedings/cw/2015/9403/0", "title": "2015 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1992/2897/0/00235226", "title": "Rendering surface-particles", "doi": null, "abstractUrl": "/proceedings-article/visual/1992/00235226/12OmNz61dc1", "parentPublication": { "id": "proceedings/visual/1992/2897/0", "title": "Proceedings Visualization '92", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/e-science/2016/4273/0/07870923", "title": "A fast algorithm for neutrally-buoyant Lagrangian particles in numerical ocean modeling", "doi": null, "abstractUrl": "/proceedings-article/e-science/2016/07870923/12OmNzUxObB", "parentPublication": { "id": "proceedings/e-science/2016/4273/0", "title": "2016 IEEE 12th International Conference on e-Science (e-Science)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sc/2014/5500/0/5500a054", "title": "24.77 Pflops on a Gravitational Tree-Code to Simulate the Milky Way Galaxy with 18600 GPUs", "doi": null, "abstractUrl": "/proceedings-article/sc/2014/5500a054/12OmNzb7Zu7", "parentPublication": { "id": "proceedings/sc/2014/5500/0", "title": "SC14: International Conference for High Performance Computing, Networking, Storage and Analysis", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/candar/2017/2087/0/2087a132", "title": "SPH-based Fluid Simulation on GPU Using Verlet List and Subdivided Cell-Linked List", "doi": null, "abstractUrl": "/proceedings-article/candar/2017/2087a132/12OmNzdoMHd", "parentPublication": { "id": "proceedings/candar/2017/2087/0", "title": "2017 Fifth International Symposium on Computing and Networking (CANDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icppw/2015/7589/0/7589a081", "title": "A Special Sorting Method for Neighbor Search Procedure in Smoothed Particle Hydrodynamics on GPUs", "doi": null, "abstractUrl": "/proceedings-article/icppw/2015/7589a081/12OmNzxPTGh", "parentPublication": { "id": "proceedings/icppw/2015/7589/0", "title": "2015 44th International Conference on Parallel Processing Workshops (ICPPW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/08/07243356", "title": "Fast Coherent Particle Advection through Time-Varying Unstructured Flow Datasets", "doi": null, "abstractUrl": "/journal/tg/2016/08/07243356/13rRUx0xPIN", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2005/03/c3039", "title": "A Seamless Approach to Multiscale Complex Fluid Simulation", "doi": null, "abstractUrl": "/magazine/cs/2005/03/c3039/13rRUxbTMt3", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdiime/2022/9009/0/900900a048", "title": "Solid-Fluid Interaction Simulation System Based on SPH Unified Particle Framework", "doi": null, "abstractUrl": "/proceedings-article/icdiime/2022/900900a048/1Iz56eSpj3y", "parentPublication": { "id": "proceedings/icdiime/2022/9009/0", "title": "2022 International Conference on 3D Immersion, Interaction and Multi-sensory Experiences (ICDIIME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/candar/2020/8221/0/822100a195", "title": "Exploiting temporal parallelism in particle-based incompressive fluid simulation on FPGA", "doi": null, "abstractUrl": "/proceedings-article/candar/2020/822100a195/1sA9a0wFBIc", "parentPublication": { "id": "proceedings/candar/2020/8221/0", "title": "2020 Eighth International Symposium on Computing and Networking (CANDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08546802", "articleId": "17D45WrVg2d", "__typename": "AdjacentArticleType" }, "next": { "fno": "08565948", "articleId": "17D45Wda7ec", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1jx1mmLVkpW", "name": "ttg202006-08573859s1-tvcg_video.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202006-08573859s1-tvcg_video.mp4", "extension": "mp4", "size": "47.7 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45Wda7ec", "doi": "10.1109/TVCG.2018.2884940", "abstract": "User interaction has the potential to greatly facilitate the exploration and understanding of 3D medical images for diagnosis and treatment. However, in certain specialized environments such as in an operating room (OR), technical and physical constraints such as the need to enforce strict sterility rules, make interaction challenging. In this paper, we propose to facilitate the intraoperative exploration of angiographic volumes by leveraging the motion of a tracked surgical pointer, a tool that is already manipulated by the surgeon when using a navigation system in the OR. We designed and implemented three interactive rendering techniques based on this principle. The benefit of each of these techniques is compared to its non-interactive counterpart in a psychophysics experiment where 20 medical imaging experts were asked to perform a reaching/targeting task while visualizing a 3D volume of angiographic data. The study showed a significant improvement of the appreciation of local vascular structure when using dynamic techniques, while not having a negative impact on the appreciation of the global structure and only a marginal impact on the execution speed. A qualitative evaluation of the different techniques showed a preference for dynamic chroma-depth in accordance with the objective metrics but a discrepancy between objective and subjective measures for dynamic aerial perspective and shading.", "abstracts": [ { "abstractType": "Regular", "content": "User interaction has the potential to greatly facilitate the exploration and understanding of 3D medical images for diagnosis and treatment. However, in certain specialized environments such as in an operating room (OR), technical and physical constraints such as the need to enforce strict sterility rules, make interaction challenging. In this paper, we propose to facilitate the intraoperative exploration of angiographic volumes by leveraging the motion of a tracked surgical pointer, a tool that is already manipulated by the surgeon when using a navigation system in the OR. We designed and implemented three interactive rendering techniques based on this principle. The benefit of each of these techniques is compared to its non-interactive counterpart in a psychophysics experiment where 20 medical imaging experts were asked to perform a reaching/targeting task while visualizing a 3D volume of angiographic data. The study showed a significant improvement of the appreciation of local vascular structure when using dynamic techniques, while not having a negative impact on the appreciation of the global structure and only a marginal impact on the execution speed. A qualitative evaluation of the different techniques showed a preference for dynamic chroma-depth in accordance with the objective metrics but a discrepancy between objective and subjective measures for dynamic aerial perspective and shading.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "User interaction has the potential to greatly facilitate the exploration and understanding of 3D medical images for diagnosis and treatment. However, in certain specialized environments such as in an operating room (OR), technical and physical constraints such as the need to enforce strict sterility rules, make interaction challenging. In this paper, we propose to facilitate the intraoperative exploration of angiographic volumes by leveraging the motion of a tracked surgical pointer, a tool that is already manipulated by the surgeon when using a navigation system in the OR. We designed and implemented three interactive rendering techniques based on this principle. The benefit of each of these techniques is compared to its non-interactive counterpart in a psychophysics experiment where 20 medical imaging experts were asked to perform a reaching/targeting task while visualizing a 3D volume of angiographic data. The study showed a significant improvement of the appreciation of local vascular structure when using dynamic techniques, while not having a negative impact on the appreciation of the global structure and only a marginal impact on the execution speed. A qualitative evaluation of the different techniques showed a preference for dynamic chroma-depth in accordance with the objective metrics but a discrepancy between objective and subjective measures for dynamic aerial perspective and shading.", "title": "Interaction Driven Enhancement of Depth Perception in Angiographic Volumes", "normalizedTitle": "Interaction Driven Enhancement of Depth Perception in Angiographic Volumes", "fno": "08565948", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Biomedical MRI", "Blood Vessels", "Data Visualisation", "Interactive Systems", "Medical Image Processing", "Rendering Computer Graphics", "Surgery", "Navigation System", "Interactive Rendering Techniques", "Noninteractive Counterpart", "Psychophysics Experiment", "20 Medical Imaging Experts", "Angiographic Data", "Appreciation", "Local Vascular Structure", "Dynamic Chroma Depth", "Interaction Driven Enhancement", "Depth Perception", "Angiographic Volumes", "User Interaction", "3 D Medical Images", "Specialized Environments", "Intraoperative Exploration", "Tracked Surgical Pointer", "Sterility Rules", "Surgery", "Rendering Computer Graphics", "Three Dimensional Displays", "Tracking", "Biomedical Imaging", "Tools", "Navigation", "Image Guided Surgery", "Volume Visualization", "Interaction Techniques", "Depth Cues", "Evaluation", "Angiography" ], "authors": [ { "givenName": "Simon", "surname": "Drouin", "fullName": "Simon Drouin", "affiliation": "Department of Biomedical Engineering, McConnell Brain Imaging Center, Montreal Neurological Institute, McGill University, Montreal, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "Daniel A. Di", "surname": "Giovanni", "fullName": "Daniel A. Di Giovanni", "affiliation": "Department of Biomedical Engineering, McConnell Brain Imaging Center, Montreal Neurological Institute, McGill University, Montreal, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "Marta", "surname": "Kersten-Oertel", "fullName": "Marta Kersten-Oertel", "affiliation": "Department of Computer Science and Software Engineering, Concordia University, Montreal, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "D. Louis", "surname": "Collins", "fullName": "D. Louis Collins", "affiliation": "Department of Biomedical Engineering, McConnell Brain Imaging Center, Montreal Neurological Institute, McGill University, Montreal, Canada", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2247-2257", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cis/2009/3931/1/3931a160", "title": "Extracting the Coronary Artery in Angiographic Image Based on à Trous Wavelet of Rotary Gaussian with Adaptive Space Coefficient", "doi": null, "abstractUrl": "/proceedings-article/cis/2009/3931a160/12OmNAGNCaX", "parentPublication": { "id": "proceedings/cis/2009/3931/1", "title": "2009 International Conference on Computational Intelligence and Security", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/1996/7441/0/00507125", "title": "The Fourier adaptive smoothness constraint for computing optical flow on sequences of angiographic images", "doi": null, "abstractUrl": "/proceedings-article/cbms/1996/00507125/12OmNrHB1Vm", "parentPublication": { "id": "proceedings/cbms/1996/7441/0", "title": "Proceedings Ninth IEEE Symposium on Computer-Based Medical Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/1996/7441/0/00507121", "title": "A method for automatically detecting the systole and diastole phases in sequences of angiographic images", "doi": null, "abstractUrl": "/proceedings-article/cbms/1996/00507121/12OmNwFid1h", "parentPublication": { "id": "proceedings/cbms/1996/7441/0", "title": "Proceedings Ninth IEEE Symposium on Computer-Based Medical Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vbc/1990/2039/0/00109321", "title": "Coronary vasculature visualization from limited angiographic views", "doi": null, "abstractUrl": "/proceedings-article/vbc/1990/00109321/12OmNxy4N2N", "parentPublication": { "id": "proceedings/vbc/1990/2039/0", "title": "[1990] Proceedings of the First Conference on Visualization in Biomedical Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cic/1989/2114/0/00130584", "title": "Computation of functional angiographic images with the Hartley transform", "doi": null, "abstractUrl": "/proceedings-article/cic/1989/00130584/12OmNy2ah21", "parentPublication": { "id": "proceedings/cic/1989/2114/0", "title": "Proceedings Computers in Cardiology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bmei/2008/3118/1/3118a341", "title": "Perception-aware Depth Cueing for Illustrative Vascular Visualization", "doi": null, "abstractUrl": "/proceedings-article/bmei/2008/3118a341/12OmNzvhvKm", "parentPublication": { "id": "proceedings/bmei/2008/3118/1", "title": "2008 International Conference on Biomedical Engineering and Informatics (BMEI 2008)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/05/v1117", "title": "Enhancing Depth Perception in Translucent Volumes", "doi": null, "abstractUrl": "/journal/tg/2006/05/v1117/13rRUygT7y1", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2018/5488/0/08621540", "title": "Inter/Intra-Constraints Optimization for Fast Vessel Enhancement in X-ray Angiographic Image Sequence", "doi": null, "abstractUrl": "/proceedings-article/bibm/2018/08621540/17D45X0yjUO", "parentPublication": { "id": "proceedings/bibm/2018/5488/0", "title": "2018 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccgiv/2022/9250/0/925000a111", "title": "Coronary Artery Segmentation from X-ray Angiographic Images using Width-aware U-Net", "doi": null, "abstractUrl": "/proceedings-article/iccgiv/2022/925000a111/1LxfpGyhNcY", "parentPublication": { "id": "proceedings/iccgiv/2022/9250/0", "title": "2022 2nd International Conference on Computer Graphics, Image and Virtualization (ICCGIV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acit-csii-bcd/2017/3302/0/3302a190", "title": "Depth Recognition in 3D Translucent Stereoscopic Imaging of Medical Volumes by Means of a Glasses-Free 3D Display", "doi": null, "abstractUrl": "/proceedings-article/acit-csii-bcd/2017/3302a190/1cdOB3HCeTm", "parentPublication": { "id": "proceedings/acit-csii-bcd/2017/3302/0", "title": "2017 5th Intl Conf on Applied Computing and Information Technology/4th Intl Conf on Computational Science/Intelligence and Applied Informatics/2nd Intl Conf on Big Data, Cloud Computing, Data Science (ACIT-CSII-BCD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08573859", "articleId": "17D45We0UEn", "__typename": "AdjacentArticleType" }, "next": { "fno": "08576679", "articleId": "17D45XreC6e", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1js2Esj96ak", "name": "ttg202006-08565948s1-interactive_rendering_video.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202006-08565948s1-interactive_rendering_video.mp4", "extension": "mp4", "size": "28.1 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45XreC6e", "doi": "10.1109/TVCG.2018.2886877", "abstract": "Material appearance of rendered objects depends on the underlying BRDF implementation used by rendering software packages. A lack of standards to exchange material parameters and data (between tools) means that artists in digital 3D prototyping and design, manually match the appearance of materials to a reference image. Since their effect on rendered output is often non-uniform and counter intuitive, selecting appropriate parameterisations for BRDF models is far from straightforward. We present a novel BRDF remapping technique, that automatically computes a mapping (BRDF Difference Probe) to match the appearance of a source material model to a target one. Through quantitative analysis, four user studies and psychometric scaling experiments, we validate our remapping framework and demonstrate that it yields a visually faithful remapping among analytical BRDFs. Most notably, our results show that even when the characteristics of the models are substantially different, such as in the case of a phenomenological model and a physically-based one, our remapped renderings are indistinguishable from the original source model.", "abstracts": [ { "abstractType": "Regular", "content": "Material appearance of rendered objects depends on the underlying BRDF implementation used by rendering software packages. A lack of standards to exchange material parameters and data (between tools) means that artists in digital 3D prototyping and design, manually match the appearance of materials to a reference image. Since their effect on rendered output is often non-uniform and counter intuitive, selecting appropriate parameterisations for BRDF models is far from straightforward. We present a novel BRDF remapping technique, that automatically computes a mapping (BRDF Difference Probe) to match the appearance of a source material model to a target one. Through quantitative analysis, four user studies and psychometric scaling experiments, we validate our remapping framework and demonstrate that it yields a visually faithful remapping among analytical BRDFs. Most notably, our results show that even when the characteristics of the models are substantially different, such as in the case of a phenomenological model and a physically-based one, our remapped renderings are indistinguishable from the original source model.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Material appearance of rendered objects depends on the underlying BRDF implementation used by rendering software packages. A lack of standards to exchange material parameters and data (between tools) means that artists in digital 3D prototyping and design, manually match the appearance of materials to a reference image. Since their effect on rendered output is often non-uniform and counter intuitive, selecting appropriate parameterisations for BRDF models is far from straightforward. We present a novel BRDF remapping technique, that automatically computes a mapping (BRDF Difference Probe) to match the appearance of a source material model to a target one. Through quantitative analysis, four user studies and psychometric scaling experiments, we validate our remapping framework and demonstrate that it yields a visually faithful remapping among analytical BRDFs. Most notably, our results show that even when the characteristics of the models are substantially different, such as in the case of a phenomenological model and a physically-based one, our remapped renderings are indistinguishable from the original source model.", "title": "Perceptually Validated Cross-Renderer Analytical BRDF Parameter Remapping", "normalizedTitle": "Perceptually Validated Cross-Renderer Analytical BRDF Parameter Remapping", "fno": "08576679", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Image Processing", "Rendering Computer Graphics", "Solid Modelling", "Visually Faithful Remapping", "Remapped Renderings", "Software Packages", "Cross Renderer Analytical BRDF Parameter Remapping", "BRDF Remapping Technique", "BRDF Difference Probe", "Digital 3 D Prototyping", "Rendering Computer Graphics", "Computational Modeling", "Lighting", "Measurement", "Probes", "Visualization", "Optimization", "BRDF", "SVBRDF", "Perceptual Validation", "Virtual Materials", "Surface Perception", "Parameter Remapping" ], "authors": [ { "givenName": "Dar'ya", "surname": "Guarnera", "fullName": "Dar'ya Guarnera", "affiliation": "Computer Science, Norwegian University of Science and Technology, Gjøvik, Norway", "__typename": "ArticleAuthorType" }, { "givenName": "Giuseppe Claudio", "surname": "Guarnera", "fullName": "Giuseppe Claudio Guarnera", "affiliation": "Computer Science, Norwegian University of Science and Technology, Gjøvik, Norway", "__typename": "ArticleAuthorType" }, { "givenName": "Matteo", "surname": "Toscani", "fullName": "Matteo Toscani", "affiliation": "Psychology, Justus-Liebig-Universität Giessen, Giessen, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Mashhuda", "surname": "Glencross", "fullName": "Mashhuda Glencross", "affiliation": "Pismo Software Ltd., Oxford, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Baihua", "surname": "Li", "fullName": "Baihua Li", "affiliation": "Computer Science, Loughborough University, Loughborough, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Jon Yngve", "surname": "Hardeberg", "fullName": "Jon Yngve Hardeberg", "affiliation": "Computer Science, Norwegian University of Science and Technology, Gjøvik, Norway", "__typename": "ArticleAuthorType" }, { "givenName": "Karl R.", "surname": "Gegenfurtner", "fullName": "Karl R. Gegenfurtner", "affiliation": "Psychology, Justus-Liebig-Universität Giessen, Giessen, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2258-2272", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/wmsvm/2010/7077/0/05558360", "title": "Modeling and Editing Isotropic BRDF", "doi": null, "abstractUrl": "/proceedings-article/wmsvm/2010/05558360/12OmNARiM3T", "parentPublication": { "id": "proceedings/wmsvm/2010/7077/0", "title": "2010 Second International Conference on Modeling, Simulation and Visualization Methods (WMSVM 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209c047", "title": "Effective Acquisition of Dense Anisotropic BRDF", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209c047/12OmNqNXEsZ", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nswctc/2010/4011/1/4011a332", "title": "The Analysis of Global Illumination Rendering Based on BRDF", "doi": null, "abstractUrl": "/proceedings-article/nswctc/2010/4011a332/12OmNyvGynS", "parentPublication": { "id": "proceedings/nswctc/2010/4011/1", "title": "Networks Security, Wireless Communications and Trusted Computing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391d559", "title": "A Gaussian Process Latent Variable Model for BRDF Inference", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391d559/12OmNzVoBvI", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/11/ttg2012111824", "title": "Rational BRDF", "doi": null, "abstractUrl": "/journal/tg/2012/11/ttg2012111824/13rRUwjGoFZ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/04/09678000", "title": "Real-Time Lighting Estimation for Augmented Reality via Differentiable Screen-Space Rendering", "doi": null, "abstractUrl": "/journal/tg/2023/04/09678000/1A4SuYWCI7K", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800f416", "title": "Neural Voxel Renderer: Learning an Accurate and Controllable Rendering Tool", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800f416/1m3nYbnokEM", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/04/09203787", "title": "Learning-Based Inverse Bi-Scale Material Fitting From Tabular BRDFs", "doi": null, "abstractUrl": "/journal/tg/2022/04/09203787/1nkyY8W8j1m", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09444888", "title": "Estimating Homogeneous Data-Driven BRDF Parameters From a Reflectance Map Under Known Natural Lighting", "doi": null, "abstractUrl": "/journal/tg/2022/12/09444888/1u51y8PQCMU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/12/09623493", "title": "Invertible Neural BRDF for Object Inverse Rendering", "doi": null, "abstractUrl": "/journal/tp/2022/12/09623493/1yJT7tLzbi0", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08565948", "articleId": "17D45Wda7ec", "__typename": "AdjacentArticleType" }, "next": { "fno": "08554159", "articleId": "17D45WB0qbp", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1js2FLKT2a4", "name": "ttg202006-08576679s1-supplemental_material.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202006-08576679s1-supplemental_material.pdf", "extension": "pdf", "size": "54.2 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45WB0qbp", "doi": "10.1109/TVCG.2018.2884468", "abstract": "Various viewing and travel techniques are used in immersive virtual reality to allow users to see different areas or perspectives of 3D environments. Our research evaluates techniques for visually showing transitions between two viewpoints in head-tracked virtual reality. We present four experiments that focus on automated viewpoint changes that are controlled by the system rather than by interactive user control. The experiments evaluate three different transition techniques (teleportation, animated interpolation, and pulsed interpolation), different types of visual adjustments for each technique, and different types of viewpoint changes. We evaluated how differences in transition can influence a viewer's comfort, sickness, and ability to maintain spatial awareness of dynamic objects in a virtual scene. For instant teleportations, the experiments found participants could most easily track scene changes with rotational transitions without translational movements. Among the tested techniques, animated interpolations allowed significantly better spatial awareness of moving objects, but the animated technique was also rated worst in terms of sickness, particularly for rotational viewpoint changes. Across techniques, viewpoint transitions involving both translational and rotational changes together were more difficult to track than either individual type of change.", "abstracts": [ { "abstractType": "Regular", "content": "Various viewing and travel techniques are used in immersive virtual reality to allow users to see different areas or perspectives of 3D environments. Our research evaluates techniques for visually showing transitions between two viewpoints in head-tracked virtual reality. We present four experiments that focus on automated viewpoint changes that are controlled by the system rather than by interactive user control. The experiments evaluate three different transition techniques (teleportation, animated interpolation, and pulsed interpolation), different types of visual adjustments for each technique, and different types of viewpoint changes. We evaluated how differences in transition can influence a viewer's comfort, sickness, and ability to maintain spatial awareness of dynamic objects in a virtual scene. For instant teleportations, the experiments found participants could most easily track scene changes with rotational transitions without translational movements. Among the tested techniques, animated interpolations allowed significantly better spatial awareness of moving objects, but the animated technique was also rated worst in terms of sickness, particularly for rotational viewpoint changes. Across techniques, viewpoint transitions involving both translational and rotational changes together were more difficult to track than either individual type of change.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Various viewing and travel techniques are used in immersive virtual reality to allow users to see different areas or perspectives of 3D environments. Our research evaluates techniques for visually showing transitions between two viewpoints in head-tracked virtual reality. We present four experiments that focus on automated viewpoint changes that are controlled by the system rather than by interactive user control. The experiments evaluate three different transition techniques (teleportation, animated interpolation, and pulsed interpolation), different types of visual adjustments for each technique, and different types of viewpoint changes. We evaluated how differences in transition can influence a viewer's comfort, sickness, and ability to maintain spatial awareness of dynamic objects in a virtual scene. For instant teleportations, the experiments found participants could most easily track scene changes with rotational transitions without translational movements. Among the tested techniques, animated interpolations allowed significantly better spatial awareness of moving objects, but the animated technique was also rated worst in terms of sickness, particularly for rotational viewpoint changes. Across techniques, viewpoint transitions involving both translational and rotational changes together were more difficult to track than either individual type of change.", "title": "Scene Transitions and Teleportation in Virtual Reality and the Implications for Spatial Awareness and Sickness", "normalizedTitle": "Scene Transitions and Teleportation in Virtual Reality and the Implications for Spatial Awareness and Sickness", "fno": "08554159", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Animation", "Virtual Reality", "Automated Viewpoint Changes", "Head Tracked Virtual Reality", "Immersive Virtual Reality", "Scene Transitions", "Rotational Changes", "Translational Changes", "Viewpoint Transitions", "Rotational Viewpoint Changes", "Animated Technique", "Rotational Transitions", "Scene Changes", "Instant Teleportations", "Virtual Scene", "Spatial Awareness", "Visual Adjustments", "Pulsed Interpolation", "Animated Interpolation", "Teleportation", "Transition Techniques", "Interactive User Control", "Teleportation", "Three Dimensional Displays", "Legged Locomotion", "Tracking", "Space Exploration", "Motion Pictures", "Virtual Reality", "Animation", "Virtual Reality", "View Transitions", "Scene Transitions", "Travel", "Immersive Cinema", "3 D Movies", "Teleportation", "Navigation", "Sickness", "Spatial Orientation", "Spatial Awareness" ], "authors": [ { "givenName": "Kasra", "surname": "Moghadam", "fullName": "Kasra Moghadam", "affiliation": "Texas A&M University, College Station, TX, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Colin", "surname": "Banigan", "fullName": "Colin Banigan", "affiliation": "Texas A&M University, College Station, TX, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Eric D.", "surname": "Ragan", "fullName": "Eric D. Ragan", "affiliation": "University of Florida, Gainesville, FL, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2273-2287", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/robot/1991/2163/0/00131936", "title": "Biped gait transitions", "doi": null, "abstractUrl": "/proceedings-article/robot/1991/00131936/12OmNAS9zt7", "parentPublication": { "id": "proceedings/robot/1991/2163/0", "title": "Proceedings. 1991 IEEE International Conference on Robotics and Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892316", "title": "An exploration of input conditions for virtual teleportation", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892316/12OmNCzb9vr", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciev/2016/1269/0/07760005", "title": "Random forests based recognition of human activities and postural transitions on smartphone", "doi": null, "abstractUrl": "/proceedings-article/iciev/2016/07760005/12OmNwtEEP6", "parentPublication": { "id": "proceedings/iciev/2016/1269/0", "title": "2016 International Conference on Informatics, Electronics and Vision (ICIEV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892386", "title": "Travel in large-scale head-worn VR: Pre-oriented teleportation with WIMs and previews", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892386/12OmNzhELm6", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/09/08031015", "title": "A Vector Field Design Approach to Animated Transitions", "doi": null, "abstractUrl": "/journal/tg/2018/09/08031015/13rRUB7a117", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/05/08998297", "title": "Teleporting through virtual environments: Effects of path scale and environment scale on spatial updating", "doi": null, "abstractUrl": "/journal/tg/2020/05/08998297/1hrXhk9mu9W", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090560", "title": "Either Give Me a Reason to Stand or an Opportunity to Sit in VR", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090560/1jIxzjmEoeY", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a608", "title": "Walking and Teleportation in Wide-area Virtual Reality Experiences", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a608/1pysv8bIfrG", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/09/09332290", "title": "Quantifiable Fine-Grain Occlusion Removal Assistance for Efficient VR Exploration", "doi": null, "abstractUrl": "/journal/tg/2022/09/09332290/1qzsRxXpW4o", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a480", "title": "Analysis of Positional Tracking Space Usage when using Teleportation", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a480/1tnXfrT4ere", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08576679", "articleId": "17D45XreC6e", "__typename": "AdjacentArticleType" }, "next": { "fno": "08554186", "articleId": "17D45WIXbPb", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1js2Mf0SAgw", "name": "ttg202006-08554159s1-transitions-examples.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202006-08554159s1-transitions-examples.mp4", "extension": "mp4", "size": "5.8 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45WIXbPb", "doi": "10.1109/TVCG.2018.2883628", "abstract": "We introduce dynamically warping grids for adaptive liquid simulation. Our primary contributions are a strategy for dynamically deforming regular grids over the course of a simulation and a method for efficiently utilizing these deforming grids for liquid simulation. Prior work has shown that unstructured grids are very effective for adaptive fluid simulations. However, unstructured grids often lead to complicated implementations and a poor cache hit rate due to inconsistent memory access. Regular grids, on the other hand, provide a fast, fixed memory access pattern and straightforward implementation. Our method combines the advantages of both: we leverage the simplicity of regular grids while still achieving practical and controllable spatial adaptivity. We demonstrate that our method enables adaptive simulations that are fast, flexible, and robust to null-space issues. At the same time, our method is simple to implement and takes advantage of existing highly-tuned algorithms.", "abstracts": [ { "abstractType": "Regular", "content": "We introduce dynamically warping grids for adaptive liquid simulation. Our primary contributions are a strategy for dynamically deforming regular grids over the course of a simulation and a method for efficiently utilizing these deforming grids for liquid simulation. Prior work has shown that unstructured grids are very effective for adaptive fluid simulations. However, unstructured grids often lead to complicated implementations and a poor cache hit rate due to inconsistent memory access. Regular grids, on the other hand, provide a fast, fixed memory access pattern and straightforward implementation. Our method combines the advantages of both: we leverage the simplicity of regular grids while still achieving practical and controllable spatial adaptivity. We demonstrate that our method enables adaptive simulations that are fast, flexible, and robust to null-space issues. At the same time, our method is simple to implement and takes advantage of existing highly-tuned algorithms.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We introduce dynamically warping grids for adaptive liquid simulation. Our primary contributions are a strategy for dynamically deforming regular grids over the course of a simulation and a method for efficiently utilizing these deforming grids for liquid simulation. Prior work has shown that unstructured grids are very effective for adaptive fluid simulations. However, unstructured grids often lead to complicated implementations and a poor cache hit rate due to inconsistent memory access. Regular grids, on the other hand, provide a fast, fixed memory access pattern and straightforward implementation. Our method combines the advantages of both: we leverage the simplicity of regular grids while still achieving practical and controllable spatial adaptivity. We demonstrate that our method enables adaptive simulations that are fast, flexible, and robust to null-space issues. At the same time, our method is simple to implement and takes advantage of existing highly-tuned algorithms.", "title": "Simulating Liquids on Dynamically Warping Grids", "normalizedTitle": "Simulating Liquids on Dynamically Warping Grids", "fno": "08554186", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Cache Storage", "Deformation", "Flow Simulation", "Mesh Generation", "Deforming Grids", "Adaptive Liquid Simulation", "Dynamically Warping Grids", "Controllable Spatial Adaptivity", "Fixed Memory Access Pattern", "Regular Grids", "Inconsistent Memory Access", "Adaptive Fluid Simulations", "Unstructured Grids", "Adaptation Models", "Strain", "Liquids", "Computational Modeling", "Streaming Media", "Computer Graphics", "Animation", "Computer Graphics", "Physics Based Animation", "Fluid Simulation", "Liquid", "Adaptivity", "Curvilinear Grids" ], "authors": [ { "givenName": "Hikaru", "surname": "Ibayashi", "fullName": "Hikaru Ibayashi", "affiliation": "Department of Computer Science, University of Southern California, Los Angels, CA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Chris", "surname": "Wojtan", "fullName": "Chris Wojtan", "affiliation": "Visual Computing Group, Institute of Science and Technology Austria, Klosterneuburg, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Nils", "surname": "Thuerey", "fullName": "Nils Thuerey", "affiliation": "Technische Universität München, München, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Takeo", "surname": "Igarashi", "fullName": "Takeo Igarashi", "affiliation": "Department of Computer Science, University of Tokyo, Hongo, Bunkyo-ku, Tokyo, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Ryoichi", "surname": "Ando", "fullName": "Ryoichi Ando", "affiliation": "National Institute of Informatics, Chiyoda-ku, Tokyo, Japan", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2288-2302", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ldav/2017/0617/0/08231854", "title": "Parallel multi-layer ghost cell generation for distributed unstructured grids", "doi": null, "abstractUrl": "/proceedings-article/ldav/2017/08231854/12OmNAKcNJN", "parentPublication": { "id": "proceedings/ldav/2017/0617/0", "title": "2017 IEEE 7th Symposium on Large Data Analysis and Visualization (LDAV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1995/7187/0/71870248", "title": "Vector Plots for Irregular Grids", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1995/71870248/12OmNC2OSMK", "parentPublication": { "id": "proceedings/ieee-vis/1995/7187/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1994/6627/0/00346313", "title": "Visualizing flow over curvilinear grid surfaces using line integral convolution", "doi": null, "abstractUrl": "/proceedings-article/visual/1994/00346313/12OmNyYDDGc", "parentPublication": { "id": "proceedings/visual/1994/6627/0", "title": "Proceedings Visualization '94", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbgames/2011/4648/0/4648a148", "title": "Fluid Animation on Arbitrarily-Shaped Structured Grids", "doi": null, "abstractUrl": "/proceedings-article/sbgames/2011/4648a148/12OmNzb7Zrb", "parentPublication": { "id": "proceedings/sbgames/2011/4648/0", "title": "2011 Brazilian Symposium on Games and Digital Entertainment", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/10/06747389", "title": "Large-Scale Liquid Simulation on Adaptive Hexahedral Grids", "doi": null, "abstractUrl": "/journal/tg/2014/10/06747389/13rRUxYrbMj", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600n3801", "title": "Image Based Reconstruction of Liquids from 2D Surface Detections", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600n3801/1H0LsB06x7q", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08554159", "articleId": "17D45WB0qbp", "__typename": "AdjacentArticleType" }, "next": { "fno": "08543848", "articleId": "17D45VsBU70", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45VsBU70", "doi": "10.1109/TVCG.2018.2883314", "abstract": "This work proposes a new stereo shading architecture that enables adaptive shading rates and automatic shading reuse among triangles and between two views. The proposed pipeline presents several novel features. First, the present sort-middle/bin shading is extended to tile pair-based shading to rasterize and shade pixels at two views simultaneously. A new rasterization algorithm utilizing epipolar geometry is then proposed to schedule tile pairs and perform rasterization at stereo views efficiently. Second, this work presents an adaptive multi-rate shading framework to compute shading on pixels at different rates. A novel tile-based screen space cache and a new cache reuse shader are proposed to perform such multi-rate shading across triangles and views. The results show that the newly proposed method outperforms the standard sort-middle shading and the state-of-the-art multi-rate shading by achieving considerably lower shading cost and memory bandwidth.", "abstracts": [ { "abstractType": "Regular", "content": "This work proposes a new stereo shading architecture that enables adaptive shading rates and automatic shading reuse among triangles and between two views. The proposed pipeline presents several novel features. First, the present sort-middle/bin shading is extended to tile pair-based shading to rasterize and shade pixels at two views simultaneously. A new rasterization algorithm utilizing epipolar geometry is then proposed to schedule tile pairs and perform rasterization at stereo views efficiently. Second, this work presents an adaptive multi-rate shading framework to compute shading on pixels at different rates. A novel tile-based screen space cache and a new cache reuse shader are proposed to perform such multi-rate shading across triangles and views. The results show that the newly proposed method outperforms the standard sort-middle shading and the state-of-the-art multi-rate shading by achieving considerably lower shading cost and memory bandwidth.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This work proposes a new stereo shading architecture that enables adaptive shading rates and automatic shading reuse among triangles and between two views. The proposed pipeline presents several novel features. First, the present sort-middle/bin shading is extended to tile pair-based shading to rasterize and shade pixels at two views simultaneously. A new rasterization algorithm utilizing epipolar geometry is then proposed to schedule tile pairs and perform rasterization at stereo views efficiently. Second, this work presents an adaptive multi-rate shading framework to compute shading on pixels at different rates. A novel tile-based screen space cache and a new cache reuse shader are proposed to perform such multi-rate shading across triangles and views. The results show that the newly proposed method outperforms the standard sort-middle shading and the state-of-the-art multi-rate shading by achieving considerably lower shading cost and memory bandwidth.", "title": "Tile Pair-Based Adaptive Multi-Rate Stereo Shading", "normalizedTitle": "Tile Pair-Based Adaptive Multi-Rate Stereo Shading", "fno": "08543848", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computational Geometry", "Pipeline Processing", "Rendering Computer Graphics", "Sorting", "Stereo Image Processing", "Shading Cost", "Tile Pair Based Adaptive Multirate Stereo Shading", "Memory Bandwidth", "Standard Sort Middle Shading", "Tile Based Screen Space Cache", "Stereo Views", "Tile Pairs", "Rasterization Algorithm", "Automatic Shading Reuse", "Adaptive Shading Rates", "Stereo Shading Architecture", "Rendering Computer Graphics", "Geometry", "Pipelines", "Bandwidth", "Computer Architecture", "Signal Resolution", "Stereo Rendering", "Tile Pair Based Rendering", "Multi Rate Shading" ], "authors": [ { "givenName": "Yazhen", "surname": "Yuan", "fullName": "Yazhen Yuan", "affiliation": "State key laboratory of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Rui", "surname": "Wang", "fullName": "Rui Wang", "affiliation": "State key laboratory of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Hujun", "surname": "Bao", "fullName": "Hujun Bao", "affiliation": "State key laboratory of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2303-2314", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/hcs/2015/8885/0/07477462", "title": "The ARM® Mali-T880 Mobile GPU", "doi": null, "abstractUrl": "/proceedings-article/hcs/2015/07477462/12OmNAS9zPX", "parentPublication": { "id": "proceedings/hcs/2015/8885/0", "title": "2015 IEEE Hot Chips 27 Symposium (HCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgi/1996/7518/0/75180038", "title": "Improved Specular Highlights With Adaptive Shading", "doi": null, "abstractUrl": "/proceedings-article/cgi/1996/75180038/12OmNwBT1ig", "parentPublication": { "id": "proceedings/cgi/1996/7518/0", "title": "Computer Graphics International Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cmpcon/1992/2655/0/00186697", "title": "Scalable graphics enhancements for PA-RISC workstations", "doi": null, "abstractUrl": "/proceedings-article/cmpcon/1992/00186697/12OmNxGSm2u", "parentPublication": { "id": "proceedings/cmpcon/1992/2655/0", "title": "COMPCON Spring 1992", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391c030", "title": "Registering Images to Untextured Geometry Using Average Shading Gradients", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391c030/12OmNyLiuzk", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/searis/2014/9955/0/07152799", "title": "guacamole - An extensible scene graph and rendering framework based on deferred shading", "doi": null, "abstractUrl": "/proceedings-article/searis/2014/07152799/12OmNzA6GLj", "parentPublication": { "id": "proceedings/searis/2014/9955/0", "title": "2014 IEEE 7th Workshop on Software Engineering and Architectures for Realtime Interactive Systems (SEARIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/01/07194844", "title": "Anisotropic Ambient Volume Shading", "doi": null, "abstractUrl": "/journal/tg/2016/01/07194844/13rRUB7a1fT", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2017/03/07452621", "title": "Shape Estimation from Shading, Defocus, and Correspondence Using Light-Field Angular Coherence", "doi": null, "abstractUrl": "/journal/tp/2017/03/07452621/13rRUxYIN5A", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/10/07501796", "title": "Aggregate G-Buffer Anti-Aliasing -Extended Version-", "doi": null, "abstractUrl": "/journal/tg/2016/10/07501796/13rRUyv53Fw", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpca/2019/1444/0/144400a623", "title": "Rendering Elimination: Early Discard of Redundant Tiles in the Graphics Pipeline", "doi": null, "abstractUrl": "/proceedings-article/hpca/2019/144400a623/18M7PSwaQkE", "parentPublication": { "id": "proceedings/hpca/2019/1444/0", "title": "2019 IEEE International Symposium on High Performance Computer Architecture (HPCA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600g177", "title": "Multi-View Mesh Reconstruction with Neural Deferred Shading", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600g177/1H0NScvhUC4", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08554186", "articleId": "17D45WIXbPb", "__typename": "AdjacentArticleType" }, "next": { "fno": "08580399", "articleId": "17D45VUZMU0", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1js2HXZuFJS", "name": "ttg202006-08543848s1-supplemental_video.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202006-08543848s1-supplemental_video.mp4", "extension": "mp4", "size": "96.1 MB", "__typename": "WebExtraType" }, { "id": "1js2MuR0GjK", "name": "ttg202006-08543848s1-supplemental_document.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202006-08543848s1-supplemental_document.pdf", "extension": "pdf", "size": "52.1 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45VUZMU0", "doi": "10.1109/TVCG.2018.2887379", "abstract": "Virtual reality (VR) has enjoyed significant popularity in recent years. Where navigation has been a fundamental appeal of 3D applications for decades, facilitating this in VR has been quite a challenge. Over the past decades, various virtual locomotion techniques (VLTs) have been developed that aim to offer natural, usable and efficient ways of navigating VR without inducing VR sickness. Several studies of these techniques have been conducted in order to evaluate their performance in various study conditions and virtual contexts. Taxonomies have also been proposed to either place similar techniques in meaningful categories or decompose them to their underlying design components. In this survey, we aim to aggregate and understand the current state of the art of VR locomotion research and discuss the design implications of VLTs in terms of strengths, weaknesses and applicability.", "abstracts": [ { "abstractType": "Regular", "content": "Virtual reality (VR) has enjoyed significant popularity in recent years. Where navigation has been a fundamental appeal of 3D applications for decades, facilitating this in VR has been quite a challenge. Over the past decades, various virtual locomotion techniques (VLTs) have been developed that aim to offer natural, usable and efficient ways of navigating VR without inducing VR sickness. Several studies of these techniques have been conducted in order to evaluate their performance in various study conditions and virtual contexts. Taxonomies have also been proposed to either place similar techniques in meaningful categories or decompose them to their underlying design components. In this survey, we aim to aggregate and understand the current state of the art of VR locomotion research and discuss the design implications of VLTs in terms of strengths, weaknesses and applicability.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Virtual reality (VR) has enjoyed significant popularity in recent years. Where navigation has been a fundamental appeal of 3D applications for decades, facilitating this in VR has been quite a challenge. Over the past decades, various virtual locomotion techniques (VLTs) have been developed that aim to offer natural, usable and efficient ways of navigating VR without inducing VR sickness. Several studies of these techniques have been conducted in order to evaluate their performance in various study conditions and virtual contexts. Taxonomies have also been proposed to either place similar techniques in meaningful categories or decompose them to their underlying design components. In this survey, we aim to aggregate and understand the current state of the art of VR locomotion research and discuss the design implications of VLTs in terms of strengths, weaknesses and applicability.", "title": "Virtual Locomotion: A Survey", "normalizedTitle": "Virtual Locomotion: A Survey", "fno": "08580399", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Virtual Reality", "Navigation", "Virtual Locomotion Techniques", "VLT", "VR Sickness", "VR Locomotion", "Navigation", "Legged Locomotion", "Task Analysis", "Monitoring", "Visualization", "Three Dimensional Displays", "Space Exploration", "Virtual Reality", "Virtual Locomotion", "Virtual Navigation", "Survey", "Taxonomy" ], "authors": [ { "givenName": "Majed", "surname": "Al Zayer", "fullName": "Majed Al Zayer", "affiliation": "University of Nevada, Reno, NV, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Paul", "surname": "MacNeilage", "fullName": "Paul MacNeilage", "affiliation": "University of Nevada, Reno, NV, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Eelke", "surname": "Folmer", "fullName": "Eelke Folmer", "affiliation": "University of Nevada, Reno, NV, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2315-2334", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892348", "title": "Steering locomotion by vestibular perturbation in room-scale VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892348/12OmNvrMUgU", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2012/1204/0/06184180", "title": "From virtual to actual mobility: Assessing the benefits of active locomotion through an immersive virtual environment using a motorized wheelchair", "doi": null, "abstractUrl": "/proceedings-article/3dui/2012/06184180/12OmNxdDFLw", "parentPublication": { "id": "proceedings/3dui/2012/1204/0", "title": "2012 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446130", "title": "Rapid, Continuous Movement Between Nodes as an Accessible Virtual Reality Locomotion Technique", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446130/13bd1f3HvEx", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/07/07946183", "title": "Walking with Virtual People: Evaluation of Locomotion Interfaces in Dynamic Environments", "doi": null, "abstractUrl": "/journal/tg/2018/07/07946183/13rRUEgs2C2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vs-games/2018/7123/0/08493432", "title": "LUTE: A Locomotion Usability Test Environmentfor Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2018/08493432/14tNJmUlJD4", "parentPublication": { "id": "proceedings/vs-games/2018/7123/0", "title": "2018 10th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/05/09714054", "title": "Remote research on locomotion interfaces for virtual reality: Replication of a lab-based study on teleporting interfaces", "doi": null, "abstractUrl": "/journal/tg/2022/05/09714054/1B0XZAXWaIg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09744001", "title": "Influence of user posture and virtual exercise on impression of locomotion during VR observation", "doi": null, "abstractUrl": "/journal/tg/5555/01/09744001/1C8BFV420lq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a696", "title": "Seamless-walk: Novel Natural Virtual Reality Locomotion Method with a High-Resolution Tactile Sensor", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a696/1CJeXaYYtd6", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09761724", "title": "Effects of Transfer Functions and Body Parts on Body-centric Locomotion in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/5555/01/09761724/1CKMkLCKOSk", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a452", "title": "Studying the Inter-Relation Between Locomotion Techniques and Embodiment in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a452/1pysvNRUnD2", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08543848", "articleId": "17D45VsBU70", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNAXPyfa", "title": "January-March", "year": "1998", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "4", "label": "January-March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwgQpDb", "doi": "10.1109/2945.675647", "abstract": "Abstract—Camera calibration and the acquisition of Euclidean 3D measurements have so far been considered necessary requirements for overlaying three-dimensional graphical objects with live video. In this article, we describe a new approach to video-based augmented reality that avoids both requirements: It does not use any metric information about the calibration parameters of the camera or the 3D locations and dimensions of the environment's objects. The only requirement is the ability to track across frames at least four fiducial points that are specified by the user during system initialization and whose world coordinates are unknown.Our approach is based on the following observation: Given a set of four or more noncoplanar 3D points, the projection of all points in the set can be computed as a linear combination of the projections of just four of the points. We exploit this observation by 1) tracking regions and color fiducial points at frame rate, and 2) representing virtual objects in a non-Euclidean, affine frame of reference that allows their projection to be computed as a linear combination of the projection of the fiducial points. Experimental results on two augmented reality systems, one monitor-based and one head-mounted, demonstrate that the approach is readily implementable, imposes minimal computational and hardware requirements, and generates real-time and accurate video overlays even when the camera parameters vary dynamically.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—Camera calibration and the acquisition of Euclidean 3D measurements have so far been considered necessary requirements for overlaying three-dimensional graphical objects with live video. In this article, we describe a new approach to video-based augmented reality that avoids both requirements: It does not use any metric information about the calibration parameters of the camera or the 3D locations and dimensions of the environment's objects. The only requirement is the ability to track across frames at least four fiducial points that are specified by the user during system initialization and whose world coordinates are unknown.Our approach is based on the following observation: Given a set of four or more noncoplanar 3D points, the projection of all points in the set can be computed as a linear combination of the projections of just four of the points. We exploit this observation by 1) tracking regions and color fiducial points at frame rate, and 2) representing virtual objects in a non-Euclidean, affine frame of reference that allows their projection to be computed as a linear combination of the projection of the fiducial points. Experimental results on two augmented reality systems, one monitor-based and one head-mounted, demonstrate that the approach is readily implementable, imposes minimal computational and hardware requirements, and generates real-time and accurate video overlays even when the camera parameters vary dynamically.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—Camera calibration and the acquisition of Euclidean 3D measurements have so far been considered necessary requirements for overlaying three-dimensional graphical objects with live video. In this article, we describe a new approach to video-based augmented reality that avoids both requirements: It does not use any metric information about the calibration parameters of the camera or the 3D locations and dimensions of the environment's objects. The only requirement is the ability to track across frames at least four fiducial points that are specified by the user during system initialization and whose world coordinates are unknown.Our approach is based on the following observation: Given a set of four or more noncoplanar 3D points, the projection of all points in the set can be computed as a linear combination of the projections of just four of the points. We exploit this observation by 1) tracking regions and color fiducial points at frame rate, and 2) representing virtual objects in a non-Euclidean, affine frame of reference that allows their projection to be computed as a linear combination of the projection of the fiducial points. Experimental results on two augmented reality systems, one monitor-based and one head-mounted, demonstrate that the approach is readily implementable, imposes minimal computational and hardware requirements, and generates real-time and accurate video overlays even when the camera parameters vary dynamically.", "title": "Calibration-Free Augmented Reality", "normalizedTitle": "Calibration-Free Augmented Reality", "fno": "v0001", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Augmented Reality", "Real Time Computer Vision", "Calibration", "Registration", "Affine Representations", "Feature Tracking", "3 D Interaction Techniques" ], "authors": [ { "givenName": "Kiriakos N.", "surname": "Kutulakos", "fullName": "Kiriakos N. Kutulakos", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "James R.", "surname": "Vallino", "fullName": "James R. Vallino", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "01", "pubDate": "1998-01-01 00:00:00", "pubType": "trans", "pages": "1-20", "year": "1998", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": null, "next": { "fno": "v0021", "articleId": "13rRUNvgyW9", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNAXPyfa", "title": "January-March", "year": "1998", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "4", "label": "January-March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUNvgyW9", "doi": "10.1109/2945.675649", "abstract": "Abstract—Collision detection is of paramount importance for many applications in computer graphics and visualization. Typically, the input to a collision detection algorithm is a large number of geometric objects comprising an environment, together with a set of objects moving within the environment. In addition to determining accurately the contacts that occur between pairs of objects, one needs also to do so at real-time rates. Applications such as haptic force-feedback can require over 1,000 collision queries per second.In this paper, we develop and analyze a method, based on bounding-volume hierarchies, for efficient collision detection for objects moving within highly complex environments. Our choice of bounding volume is to use a \"discrete orientation polytope\" (\"k-dop\"), a convex polytope whose facets are determined by halfspaces whose outward normals come from a small fixed set of k orientations. We compare a variety of methods for constructing hierarchies (\"BV-trees\") of bounding k-dops. Further, we propose algorithms for maintaining an effective BV-tree of k-dops for moving objects, as they rotate, and for performing fast collision detection using BV-trees of the moving objects and of the environment.Our algorithms have been implemented and tested. We provide experimental evidence showing that our approach yields substantially faster collision detection than previous methods.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—Collision detection is of paramount importance for many applications in computer graphics and visualization. Typically, the input to a collision detection algorithm is a large number of geometric objects comprising an environment, together with a set of objects moving within the environment. In addition to determining accurately the contacts that occur between pairs of objects, one needs also to do so at real-time rates. Applications such as haptic force-feedback can require over 1,000 collision queries per second.In this paper, we develop and analyze a method, based on bounding-volume hierarchies, for efficient collision detection for objects moving within highly complex environments. Our choice of bounding volume is to use a \"discrete orientation polytope\" (\"k-dop\"), a convex polytope whose facets are determined by halfspaces whose outward normals come from a small fixed set of k orientations. We compare a variety of methods for constructing hierarchies (\"BV-trees\") of bounding k-dops. Further, we propose algorithms for maintaining an effective BV-tree of k-dops for moving objects, as they rotate, and for performing fast collision detection using BV-trees of the moving objects and of the environment.Our algorithms have been implemented and tested. We provide experimental evidence showing that our approach yields substantially faster collision detection than previous methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—Collision detection is of paramount importance for many applications in computer graphics and visualization. Typically, the input to a collision detection algorithm is a large number of geometric objects comprising an environment, together with a set of objects moving within the environment. In addition to determining accurately the contacts that occur between pairs of objects, one needs also to do so at real-time rates. Applications such as haptic force-feedback can require over 1,000 collision queries per second.In this paper, we develop and analyze a method, based on bounding-volume hierarchies, for efficient collision detection for objects moving within highly complex environments. Our choice of bounding volume is to use a \"discrete orientation polytope\" (\"k-dop\"), a convex polytope whose facets are determined by halfspaces whose outward normals come from a small fixed set of k orientations. We compare a variety of methods for constructing hierarchies (\"BV-trees\") of bounding k-dops. Further, we propose algorithms for maintaining an effective BV-tree of k-dops for moving objects, as they rotate, and for performing fast collision detection using BV-trees of the moving objects and of the environment.Our algorithms have been implemented and tested. We provide experimental evidence showing that our approach yields substantially faster collision detection than previous methods.", "title": "Efficient Collision Detection Using Bounding Volume Hierarchies of k-DOPs", "normalizedTitle": "Efficient Collision Detection Using Bounding Volume Hierarchies of k-DOPs", "fno": "v0021", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Collision Detection", "Intersection Searching", "Bounding Volume Hierarchies", "Discrete Orientation Polytopes", "Bounding Boxes", "Virtual Reality", "Virtual Environments" ], "authors": [ { "givenName": "James T.", "surname": "Klosowski", "fullName": "James T. Klosowski", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Martin", "surname": "Held", "fullName": "Martin Held", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Joseph S.B.", "surname": "Mitchell", "fullName": "Joseph S.B. Mitchell", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Henry", "surname": "Sowizral", "fullName": "Henry Sowizral", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Karel", "surname": "Zikan", "fullName": "Karel Zikan", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "01", "pubDate": "1998-01-01 00:00:00", "pubType": "trans", "pages": "21-36", "year": "1998", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "v0001", "articleId": "13rRUwgQpDb", "__typename": "AdjacentArticleType" }, "next": { "fno": "v0037", "articleId": "13rRUwwaKsU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNAXPyfa", "title": "January-March", "year": "1998", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "4", "label": "January-March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwwaKsU", "doi": "10.1109/2945.675650", "abstract": "Abstract—This paper describes a volume rendering system for unstructured data, especially finite element data, that creates images with very high accuracy. The system will currently handle meshes whose cells are either linear or quadratic tetrahedra. Compromises or approximations are not introduced for the sake of efficiency. Whenever possible, exact mathematical solutions for the radiance integrals involved and for interpolation are used. The system will also handle meshes with mixed cell types: tetrahedra, bricks, prisms, wedges, and pyramids, but not with high accuracy. Accurate semitransparent shaded isosurfaces may be embedded in the volume rendering. For very small cells, subpixel accumulation by splatting is used to avoid sampling error. A revision to an existing accurate visibility ordering algorithm is described, which includes a correction and a method for dramatically increasing its efficiency. Finally, hardware assisted projection and compositing are extended from tetrahedra to arbitrary convex polyhedra.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—This paper describes a volume rendering system for unstructured data, especially finite element data, that creates images with very high accuracy. The system will currently handle meshes whose cells are either linear or quadratic tetrahedra. Compromises or approximations are not introduced for the sake of efficiency. Whenever possible, exact mathematical solutions for the radiance integrals involved and for interpolation are used. The system will also handle meshes with mixed cell types: tetrahedra, bricks, prisms, wedges, and pyramids, but not with high accuracy. Accurate semitransparent shaded isosurfaces may be embedded in the volume rendering. For very small cells, subpixel accumulation by splatting is used to avoid sampling error. A revision to an existing accurate visibility ordering algorithm is described, which includes a correction and a method for dramatically increasing its efficiency. Finally, hardware assisted projection and compositing are extended from tetrahedra to arbitrary convex polyhedra.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—This paper describes a volume rendering system for unstructured data, especially finite element data, that creates images with very high accuracy. The system will currently handle meshes whose cells are either linear or quadratic tetrahedra. Compromises or approximations are not introduced for the sake of efficiency. Whenever possible, exact mathematical solutions for the radiance integrals involved and for interpolation are used. The system will also handle meshes with mixed cell types: tetrahedra, bricks, prisms, wedges, and pyramids, but not with high accuracy. Accurate semitransparent shaded isosurfaces may be embedded in the volume rendering. For very small cells, subpixel accumulation by splatting is used to avoid sampling error. A revision to an existing accurate visibility ordering algorithm is described, which includes a correction and a method for dramatically increasing its efficiency. Finally, hardware assisted projection and compositing are extended from tetrahedra to arbitrary convex polyhedra.", "title": "A High Accuracy Volume Renderer for Unstructured Data", "normalizedTitle": "A High Accuracy Volume Renderer for Unstructured Data", "fno": "v0037", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Volume Rendering", "Unstructured Meshes", "High Accuracy", "Finite Element Method", "Isosurfaces", "Splatting", "Cell Projection", "Visibility Ordering", "Depth Sorting" ], "authors": [ { "givenName": "Peter L.", "surname": "Williams", "fullName": "Peter L. Williams", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Nelson L.", "surname": "Max", "fullName": "Nelson L. Max", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Clifford M.", "surname": "Stein", "fullName": "Clifford M. Stein", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "01", "pubDate": "1998-01-01 00:00:00", "pubType": "trans", "pages": "37-54", "year": "1998", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "v0021", "articleId": "13rRUNvgyW9", "__typename": "AdjacentArticleType" }, "next": { "fno": "v0055", "articleId": "13rRUxly95q", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNAXPyfa", "title": "January-March", "year": "1998", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "4", "label": "January-March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxly95q", "doi": "10.1109/2945.675652", "abstract": "Abstract—Complex repetitive scenes containing forests, foliage, grass, hair, or fur, are challenging for common modeling and rendering tools. The amount of data, the tediousness of modeling and animation tasks, and the cost of realistic rendering have caused such kind of scene to see only limited use even in high-end productions. We describe here how the use of volumetric textures is well suited to such scenes. These primitives can greatly simplify modeling and animation tasks. More importantly, they can be very efficiently rendered using ray tracing with few aliasing artifacts. The main idea, initially introduced by Kajiya and Kay [9], is to represent a pattern of 3D geometry in a reference volume, that is tiled over an underlying surface much like a regular 2D texture. In our contribution, the mapping is independent of the mesh subdivision, the pattern can contain any kind of shape, and it is prefiltered at different scales as for MIP-mapping. Although the model encoding is volumetric, the rendering method differs greatly from traditional volume rendering: A volumetric texture only exists in the neighborhood of a surface, and the repeated instances (called texels) of the reference volume are spatially deformed. Furthermore, each voxel of the reference volume contains a key feature which controls the reflectance function that represents aggregate intravoxel geometry. This allows for ray-tracing of highly complex scenes with very few aliasing artifacts, using a single ray per pixel (for the part of the scene using the volumetric texture representation). The major technical considerations of our method lie in the ray-path determination and in the specification of the reflectance function.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—Complex repetitive scenes containing forests, foliage, grass, hair, or fur, are challenging for common modeling and rendering tools. The amount of data, the tediousness of modeling and animation tasks, and the cost of realistic rendering have caused such kind of scene to see only limited use even in high-end productions. We describe here how the use of volumetric textures is well suited to such scenes. These primitives can greatly simplify modeling and animation tasks. More importantly, they can be very efficiently rendered using ray tracing with few aliasing artifacts. The main idea, initially introduced by Kajiya and Kay [9], is to represent a pattern of 3D geometry in a reference volume, that is tiled over an underlying surface much like a regular 2D texture. In our contribution, the mapping is independent of the mesh subdivision, the pattern can contain any kind of shape, and it is prefiltered at different scales as for MIP-mapping. Although the model encoding is volumetric, the rendering method differs greatly from traditional volume rendering: A volumetric texture only exists in the neighborhood of a surface, and the repeated instances (called texels) of the reference volume are spatially deformed. Furthermore, each voxel of the reference volume contains a key feature which controls the reflectance function that represents aggregate intravoxel geometry. This allows for ray-tracing of highly complex scenes with very few aliasing artifacts, using a single ray per pixel (for the part of the scene using the volumetric texture representation). The major technical considerations of our method lie in the ray-path determination and in the specification of the reflectance function.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—Complex repetitive scenes containing forests, foliage, grass, hair, or fur, are challenging for common modeling and rendering tools. The amount of data, the tediousness of modeling and animation tasks, and the cost of realistic rendering have caused such kind of scene to see only limited use even in high-end productions. We describe here how the use of volumetric textures is well suited to such scenes. These primitives can greatly simplify modeling and animation tasks. More importantly, they can be very efficiently rendered using ray tracing with few aliasing artifacts. The main idea, initially introduced by Kajiya and Kay [9], is to represent a pattern of 3D geometry in a reference volume, that is tiled over an underlying surface much like a regular 2D texture. In our contribution, the mapping is independent of the mesh subdivision, the pattern can contain any kind of shape, and it is prefiltered at different scales as for MIP-mapping. Although the model encoding is volumetric, the rendering method differs greatly from traditional volume rendering: A volumetric texture only exists in the neighborhood of a surface, and the repeated instances (called texels) of the reference volume are spatially deformed. Furthermore, each voxel of the reference volume contains a key feature which controls the reflectance function that represents aggregate intravoxel geometry. This allows for ray-tracing of highly complex scenes with very few aliasing artifacts, using a single ray per pixel (for the part of the scene using the volumetric texture representation). The major technical considerations of our method lie in the ray-path determination and in the specification of the reflectance function.", "title": "Modeling, Animating, and Rendering Complex Scenes Using Volumetric Textures", "normalizedTitle": "Modeling, Animating, and Rendering Complex Scenes Using Volumetric Textures", "fno": "v0055", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Volumetric Textures", "Complex Geometry", "Levels Of Detail" ], "authors": [ { "givenName": "Fabrice", "surname": "Neyret", "fullName": "Fabrice Neyret", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "01", "pubDate": "1998-01-01 00:00:00", "pubType": "trans", "pages": "55-70", "year": "1998", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "v0037", "articleId": "13rRUwwaKsU", "__typename": "AdjacentArticleType" }, "next": { "fno": "v0071", "articleId": "13rRUB7a1fF", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNAXPyfa", "title": "January-March", "year": "1998", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "4", "label": "January-March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUB7a1fF", "doi": "10.1109/2945.675655", "abstract": "Abstract—A technique is presented for line art rendering of scenes composed of freeform surfaces. The line art that is created for parametric surfaces is practically intrinsic and is globally invariant to changes in the surface parameterization. This method is equally applicable for line art rendering of implicit forms, creating a unified line art rendering method for both parametric and implicit forms. This added flexibility exposes a new horizon of special, parameterization independent, line art effects. Moreover, the production of the line art illustrations can be combined with traditional rendering techniques such as transparency and texture mapping. Examples that demonstrate the capabilities of the proposed approach are presented for both the parametric and implicit forms.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—A technique is presented for line art rendering of scenes composed of freeform surfaces. The line art that is created for parametric surfaces is practically intrinsic and is globally invariant to changes in the surface parameterization. This method is equally applicable for line art rendering of implicit forms, creating a unified line art rendering method for both parametric and implicit forms. This added flexibility exposes a new horizon of special, parameterization independent, line art effects. Moreover, the production of the line art illustrations can be combined with traditional rendering techniques such as transparency and texture mapping. Examples that demonstrate the capabilities of the proposed approach are presented for both the parametric and implicit forms.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—A technique is presented for line art rendering of scenes composed of freeform surfaces. The line art that is created for parametric surfaces is practically intrinsic and is globally invariant to changes in the surface parameterization. This method is equally applicable for line art rendering of implicit forms, creating a unified line art rendering method for both parametric and implicit forms. This added flexibility exposes a new horizon of special, parameterization independent, line art effects. Moreover, the production of the line art illustrations can be combined with traditional rendering techniques such as transparency and texture mapping. Examples that demonstrate the capabilities of the proposed approach are presented for both the parametric and implicit forms.", "title": "Line Art Illustrations of Parametric and Implicit Forms", "normalizedTitle": "Line Art Illustrations of Parametric and Implicit Forms", "fno": "v0071", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Sketches", "Illustrations", "Line Drawings", "Freeform Surfaces", "NUR Bs", "Implicit Forms", "Surface Coverage", "Printing" ], "authors": [ { "givenName": "Gershon", "surname": "Elber", "fullName": "Gershon Elber", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "01", "pubDate": "1998-01-01 00:00:00", "pubType": "trans", "pages": "71-81", "year": "1998", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "v0055", "articleId": "13rRUxly95q", "__typename": "AdjacentArticleType" }, "next": { "fno": "v0082", "articleId": "13rRUx0geuY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNAXPyfa", "title": "January-March", "year": "1998", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "4", "label": "January-March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUx0geuY", "doi": "10.1109/2945.675656", "abstract": "Abstract—A terrain is most often represented with a digital elevation map consisting of a set of sample points from the terrain surface. This paper presents a fast and practical algorithm to compute the horizon, or skyline, at all sample points of a terrain. The horizons are useful in a number of applications, including the rendering of self-shadowing displacement maps, visibility culling for faster flight simulation, and rendering of cartographic data. Experimental and theoretical results are presented which show that the algorithm is more accurate that previous algorithms and is faster than previous algorithms in terrains of more than 100,000 sample points.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—A terrain is most often represented with a digital elevation map consisting of a set of sample points from the terrain surface. This paper presents a fast and practical algorithm to compute the horizon, or skyline, at all sample points of a terrain. The horizons are useful in a number of applications, including the rendering of self-shadowing displacement maps, visibility culling for faster flight simulation, and rendering of cartographic data. Experimental and theoretical results are presented which show that the algorithm is more accurate that previous algorithms and is faster than previous algorithms in terrains of more than 100,000 sample points.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—A terrain is most often represented with a digital elevation map consisting of a set of sample points from the terrain surface. This paper presents a fast and practical algorithm to compute the horizon, or skyline, at all sample points of a terrain. The horizons are useful in a number of applications, including the rendering of self-shadowing displacement maps, visibility culling for faster flight simulation, and rendering of cartographic data. Experimental and theoretical results are presented which show that the algorithm is more accurate that previous algorithms and is faster than previous algorithms in terrains of more than 100,000 sample points.", "title": "Fast Horizon Computation at All Points of a Terrain With Visibility and Shading Applications", "normalizedTitle": "Fast Horizon Computation at All Points of a Terrain With Visibility and Shading Applications", "fno": "v0082", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Terrain", "Digital Elevation Map", "Horizon", "Skyline", "Visibility", "Shadows", "Rendering", "GIS" ], "authors": [ { "givenName": "A. James", "surname": "Stewart", "fullName": "A. James Stewart", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "01", "pubDate": "1998-01-01 00:00:00", "pubType": "trans", "pages": "82-93", "year": "1998", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "v0071", "articleId": "13rRUB7a1fF", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZpr3", "title": "Sept.", "year": "2013", "issueNum": "09", "idPrefix": "tg", "pubType": "journal", "volume": "19", "label": "Sept.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwgyOjk", "doi": "10.1109/TVCG.2013.63", "abstract": "Poisson disk sampling has excellent spatial and spectral properties, and plays an important role in a variety of visual computing. Although many promising algorithms have been proposed for multidimensional sampling in euclidean space, very few studies have been reported with regard to the problem of generating Poisson disks on surfaces due to the complicated nature of the surface. This paper presents an intrinsic algorithm for parallel Poisson disk sampling on arbitrary surfaces. In sharp contrast to the conventional parallel approaches, our method neither partitions the given surface into small patches nor uses any spatial data structure to maintain the voids in the sampling domain. Instead, our approach assigns each sample candidate a random and unique priority that is unbiased with regard to the distribution. Hence, multiple threads can process the candidates simultaneously and resolve conflicts by checking the given priority values. Our algorithm guarantees that the generated Poisson disks are uniformly and randomly distributed without bias. It is worth noting that our method is intrinsic and independent of the embedding space. This intrinsic feature allows us to generate Poisson disk patterns on arbitrary surfaces in IRn. To our knowledge, this is the first intrinsic, parallel, and accurate algorithm for surface Poisson disk sampling. Furthermore, by manipulating the spatially varying density function, we can obtain adaptive sampling easily.", "abstracts": [ { "abstractType": "Regular", "content": "Poisson disk sampling has excellent spatial and spectral properties, and plays an important role in a variety of visual computing. Although many promising algorithms have been proposed for multidimensional sampling in euclidean space, very few studies have been reported with regard to the problem of generating Poisson disks on surfaces due to the complicated nature of the surface. This paper presents an intrinsic algorithm for parallel Poisson disk sampling on arbitrary surfaces. In sharp contrast to the conventional parallel approaches, our method neither partitions the given surface into small patches nor uses any spatial data structure to maintain the voids in the sampling domain. Instead, our approach assigns each sample candidate a random and unique priority that is unbiased with regard to the distribution. Hence, multiple threads can process the candidates simultaneously and resolve conflicts by checking the given priority values. Our algorithm guarantees that the generated Poisson disks are uniformly and randomly distributed without bias. It is worth noting that our method is intrinsic and independent of the embedding space. This intrinsic feature allows us to generate Poisson disk patterns on arbitrary surfaces in IRn. To our knowledge, this is the first intrinsic, parallel, and accurate algorithm for surface Poisson disk sampling. Furthermore, by manipulating the spatially varying density function, we can obtain adaptive sampling easily.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Poisson disk sampling has excellent spatial and spectral properties, and plays an important role in a variety of visual computing. Although many promising algorithms have been proposed for multidimensional sampling in euclidean space, very few studies have been reported with regard to the problem of generating Poisson disks on surfaces due to the complicated nature of the surface. This paper presents an intrinsic algorithm for parallel Poisson disk sampling on arbitrary surfaces. In sharp contrast to the conventional parallel approaches, our method neither partitions the given surface into small patches nor uses any spatial data structure to maintain the voids in the sampling domain. Instead, our approach assigns each sample candidate a random and unique priority that is unbiased with regard to the distribution. Hence, multiple threads can process the candidates simultaneously and resolve conflicts by checking the given priority values. Our algorithm guarantees that the generated Poisson disks are uniformly and randomly distributed without bias. It is worth noting that our method is intrinsic and independent of the embedding space. This intrinsic feature allows us to generate Poisson disk patterns on arbitrary surfaces in IRn. To our knowledge, this is the first intrinsic, parallel, and accurate algorithm for surface Poisson disk sampling. Furthermore, by manipulating the spatially varying density function, we can obtain adaptive sampling easily.", "title": "An Intrinsic Algorithm for Parallel Poisson Disk Sampling on Arbitrary Surfaces", "normalizedTitle": "An Intrinsic Algorithm for Parallel Poisson Disk Sampling on Arbitrary Surfaces", "fno": "ttg2013091425", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Instruction Sets", "Partitioning Algorithms", "Data Structures", "Approximation Algorithms", "Algorithm Design And Analysis", "Spatial Databases", "Geodesic Distance", "Parallel Poisson Disk Sampling", "Intrinsic Algorithm", "Unbiased Sampling", "GPU" ], "authors": [ { "givenName": null, "surname": "Xiang Ying", "fullName": "Xiang Ying", "affiliation": "Sch. of Comput. Eng., Nanyang Technol. Univ., Singapore, Singapore", "__typename": "ArticleAuthorType" }, { "givenName": null, "surname": "Shi-Qing Xin", "fullName": "Shi-Qing Xin", "affiliation": "Sch. of Comput. Eng., Nanyang Technol. Univ., Singapore, Singapore", "__typename": "ArticleAuthorType" }, { "givenName": null, "surname": "Qian Sun", "fullName": "Qian Sun", "affiliation": "Sch. of Comput. Eng., Nanyang Technol. Univ., Singapore, Singapore", "__typename": "ArticleAuthorType" }, { "givenName": null, "surname": "Ying He", "fullName": "Ying He", "affiliation": "Sch. of Comput. Eng., Nanyang Technol. Univ., Singapore, Singapore", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "09", "pubDate": "2013-09-01 00:00:00", "pubType": "trans", "pages": "1425-1437", "year": "2013", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2013/4989/0/4989a233", "title": "Intrinsic Characterization of Dynamic Surfaces", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2013/4989a233/12OmNx7ov3C", "parentPublication": { "id": "proceedings/cvpr/2013/4989/0", "title": "2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2015/9403/0/9403a090", "title": "Writing Chinese Calligraphy on Arbitrary Surfaces", "doi": null, "abstractUrl": "/proceedings-article/cw/2015/9403a090/12OmNybfr6u", "parentPublication": { "id": "proceedings/cw/2015/9403/0", "title": "2015 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isip/2010/4261/0/4261a462", "title": "The Split Bregman Method for Image Diffusion on Implicit Surfaces", "doi": null, "abstractUrl": "/proceedings-article/isip/2010/4261a462/12OmNyuPLp1", "parentPublication": { "id": "proceedings/isip/2010/4261/0", "title": "2010 Third International Symposium on Information Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rt/2007/1629/0/04342600", "title": "Poisson Disk Point Sets by Hierarchical Dart Throwing", "doi": null, "abstractUrl": "/proceedings-article/rt/2007/04342600/12OmNzFMFqN", "parentPublication": { "id": "proceedings/rt/2007/1629/0", "title": "IEEE/ EG Symposium on Interactive Ray Tracing 2007", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2017/0733/0/0733a690", "title": "Poisson Disk Sampling on the Grassmannnian: Applications in Subspace Optimization", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2017/0733a690/12OmNzTYC7k", "parentPublication": { "id": "proceedings/cvprw/2017/0733/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/05/ttg2008050982", "title": "Dual Poisson-Disk Tiling: An Efficient Method for Distributing Features on Arbitrary Surfaces", "doi": null, "abstractUrl": "/journal/tg/2008/05/ttg2008050982/13rRUwI5UfY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/06/07927461", "title": "Good Random Multi-Triangulation of Surfaces", "doi": null, "abstractUrl": "/journal/tg/2018/06/07927461/13rRUxly9e2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/04/ttg2008040805", "title": "Globally Optimal Surface Mapping for Surfaces with Arbitrary Topology", "doi": null, "abstractUrl": "/journal/tg/2008/04/ttg2008040805/13rRUygT7su", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisce/2018/5500/0/550000b153", "title": "Mixed Tensor Product of q-Bezier-Poisson Surfaces", "doi": null, "abstractUrl": "/proceedings-article/icisce/2018/550000b153/17D45WgziRw", "parentPublication": { "id": "proceedings/icisce/2018/5500/0", "title": "2018 5th International Conference on Information Science and Control Engineering (ICISCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/culture-and-computing/2017/1135/0/08227334", "title": "Improving Transparent Visualization of Large-Scale Laser-Scanned Point Clouds by Using Poisson Disk Sampling", "doi": null, "abstractUrl": "/proceedings-article/culture-and-computing/2017/08227334/17D45XERmmb", "parentPublication": { "id": "proceedings/culture-and-computing/2017/1135/0", "title": "2017 International Conference on Culture and Computing (Culture and Computing)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": null, "next": { "fno": "ttg2013091438", "articleId": "13rRUxASuGk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZpr3", "title": "Sept.", "year": "2013", "issueNum": "09", "idPrefix": "tg", "pubType": "journal", "volume": "19", "label": "Sept.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxASuGk", "doi": "10.1109/TVCG.2013.66", "abstract": "We present Bristle Maps, a novel method for the aggregation, abstraction, and stylization of spatiotemporal data that enables multiattribute visualization, exploration, and analysis. This visualization technique supports the display of multidimensional data by providing users with a multiparameter encoding scheme within a single visual encoding paradigm. Given a set of geographically located spatiotemporal events, we approximate the data as a continuous function using kernel density estimation. The density estimation encodes the probability that an event will occur within the space over a given temporal aggregation. These probability values, for one or more set of events, are then encoded into a bristle map. A bristle map consists of a series of straight lines that extend from, and are connected to, linear map elements such as roads, train, subway lines, and so on. These lines vary in length, density, color, orientation, and transparencyâcreating the multivariate attribute encoding scheme where event magnitude, change, and uncertainty can be mapped as various bristle parameters. This approach increases the amount of information displayed in a single plot and allows for unique designs for various information schemes. We show the application of our bristle map encoding scheme using categorical spatiotemporal police reports. Our examples demonstrate the use of our technique for visualizing data magnitude, variable comparisons, and a variety of multivariate attribute combinations. To evaluate the effectiveness of our bristle map, we have conducted quantitative and qualitative evaluations in which we compare our bristle map to conventional geovisualization techniques. Our results show that bristle maps are competitive in completion time and accuracy of tasks with various levels of complexity.", "abstracts": [ { "abstractType": "Regular", "content": "We present Bristle Maps, a novel method for the aggregation, abstraction, and stylization of spatiotemporal data that enables multiattribute visualization, exploration, and analysis. This visualization technique supports the display of multidimensional data by providing users with a multiparameter encoding scheme within a single visual encoding paradigm. Given a set of geographically located spatiotemporal events, we approximate the data as a continuous function using kernel density estimation. The density estimation encodes the probability that an event will occur within the space over a given temporal aggregation. These probability values, for one or more set of events, are then encoded into a bristle map. A bristle map consists of a series of straight lines that extend from, and are connected to, linear map elements such as roads, train, subway lines, and so on. These lines vary in length, density, color, orientation, and transparencyâcreating the multivariate attribute encoding scheme where event magnitude, change, and uncertainty can be mapped as various bristle parameters. This approach increases the amount of information displayed in a single plot and allows for unique designs for various information schemes. We show the application of our bristle map encoding scheme using categorical spatiotemporal police reports. Our examples demonstrate the use of our technique for visualizing data magnitude, variable comparisons, and a variety of multivariate attribute combinations. To evaluate the effectiveness of our bristle map, we have conducted quantitative and qualitative evaluations in which we compare our bristle map to conventional geovisualization techniques. Our results show that bristle maps are competitive in completion time and accuracy of tasks with various levels of complexity.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present Bristle Maps, a novel method for the aggregation, abstraction, and stylization of spatiotemporal data that enables multiattribute visualization, exploration, and analysis. This visualization technique supports the display of multidimensional data by providing users with a multiparameter encoding scheme within a single visual encoding paradigm. Given a set of geographically located spatiotemporal events, we approximate the data as a continuous function using kernel density estimation. The density estimation encodes the probability that an event will occur within the space over a given temporal aggregation. These probability values, for one or more set of events, are then encoded into a bristle map. A bristle map consists of a series of straight lines that extend from, and are connected to, linear map elements such as roads, train, subway lines, and so on. These lines vary in length, density, color, orientation, and transparencyâcreating the multivariate attribute encoding scheme where event magnitude, change, and uncertainty can be mapped as various bristle parameters. This approach increases the amount of information displayed in a single plot and allows for unique designs for various information schemes. We show the application of our bristle map encoding scheme using categorical spatiotemporal police reports. Our examples demonstrate the use of our technique for visualizing data magnitude, variable comparisons, and a variety of multivariate attribute combinations. To evaluate the effectiveness of our bristle map, we have conducted quantitative and qualitative evaluations in which we compare our bristle map to conventional geovisualization techniques. Our results show that bristle maps are competitive in completion time and accuracy of tasks with various levels of complexity.", "title": "Bristle Maps: A Multivariate Abstraction Technique for Geovisualization", "normalizedTitle": "Bristle Maps: A Multivariate Abstraction Technique for Geovisualization", "fno": "ttg2013091438", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Encoding", "Image Color Analysis", "Data Visualization", "Visualization", "Equations", "Spatiotemporal Phenomena", "Kernel", "Geovisualization", "Data Transformation And Representation", "Data Abstraction", "Illustrative Visualization" ], "authors": [ { "givenName": null, "surname": "SungYe Kim", "fullName": "SungYe Kim", "affiliation": "Sch. of Electr. & Comput. Eng., Purdue Univ., West Lafayette, IN, USA", "__typename": "ArticleAuthorType" }, { "givenName": "R.", "surname": "Maciejewski", "fullName": "R. Maciejewski", "affiliation": "Sch. of Comput., Inf., & Decision Syst. Eng., Arizona State Univ., Tempe, AZ, USA", "__typename": "ArticleAuthorType" }, { "givenName": "A.", "surname": "Malik", "fullName": "A. Malik", "affiliation": "Sch. of Electr. & Comput. Eng., Purdue Univ., West Lafayette, IN, USA", "__typename": "ArticleAuthorType" }, { "givenName": null, "surname": "Yun Jang", "fullName": "Yun Jang", "affiliation": "Dept. of Comput. Eng., Sejong Univ., Seoul, South Korea", "__typename": "ArticleAuthorType" }, { "givenName": "D. S.", "surname": "Ebert", "fullName": "D. S. Ebert", "affiliation": "Sch. of Electr. & Comput. Eng., Purdue Univ., West Lafayette, IN, USA", "__typename": "ArticleAuthorType" }, { "givenName": "T.", "surname": "Isenberg", "fullName": "T. Isenberg", "affiliation": "INRIA-Saclay, Univ. Paris-Sud, Orsay, France", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "09", "pubDate": "2013-09-01 00:00:00", "pubType": "trans", "pages": "1438-1454", "year": "2013", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/pacificvis/2011/935/0/05742384", "title": "Interactive visualization of multivariate trajectory data with density maps", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2011/05742384/12OmNqAU6rq", "parentPublication": { "id": "proceedings/pacificvis/2011/935/0", "title": "2011 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504789", "title": "The Amnesia Atlas VR. A photographic media interface as memory-prosthesis", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504789/12OmNzG4gxE", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2017/2715/0/08258325", "title": "Spatiotemporal visualization of traffic paths using color space time curve", "doi": null, "abstractUrl": "/proceedings-article/big-data/2017/08258325/17D45XeKgni", "parentPublication": { "id": "proceedings/big-data/2017/2715/0", "title": "2017 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2017/2715/0/08257966", "title": "Visual analytics with unparalleled variety scaling for big earth data", "doi": null, "abstractUrl": "/proceedings-article/big-data/2017/08257966/17D45XfSEUp", "parentPublication": { "id": "proceedings/big-data/2017/2715/0", "title": "2017 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600c864", "title": "Self-supervised Video Transformer", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600c864/1H0NlQdTmlW", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icnisc/2022/5351/0/535100a328", "title": "Target Tracking Based on Spatiotemporal Saliency and Multiscale Appearance Cue Fusion", "doi": null, "abstractUrl": "/proceedings-article/icnisc/2022/535100a328/1KYt0aYr3by", "parentPublication": { "id": "proceedings/icnisc/2022/5351/0", "title": "2022 8th Annual International Conference on Network and Information Systems for Computers (ICNISC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/9.346E294", "title": "DSAG: A Scalable Deep Framework for Action-Conditioned Multi-Actor Full Body Motion Synthesis", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/9.346E294/1L8qhSqpWrS", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08807296", "title": "GenerativeMap: Visualization and Exploration of Dynamic Density Maps via Generative Learning Model", "doi": null, "abstractUrl": "/journal/tg/2020/01/08807296/1cG6usdi8aQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2020/9134/0/913400a467", "title": "Spatiotemporal Phenomena Summarization through Static Visual Narratives", "doi": null, "abstractUrl": "/proceedings-article/iv/2020/913400a467/1rSRaNwIpFK", "parentPublication": { "id": "proceedings/iv/2020/9134/0", "title": "2020 24th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/01/09552191", "title": "DDLVis: Real-time Visual Query of Spatiotemporal Data Distribution via Density Dictionary Learning", "doi": null, "abstractUrl": "/journal/tg/2022/01/09552191/1xic2jmfPOg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2013091425", "articleId": "13rRUwgyOjk", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2013091455", "articleId": "13rRUx0xPTS", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZpr3", "title": "Sept.", "year": "2013", "issueNum": "09", "idPrefix": "tg", "pubType": "journal", "volume": "19", "label": "Sept.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUx0xPTS", "doi": "10.1109/TVCG.2013.7", "abstract": "In this paper, Cosine-Weighted B-spline (CWB) filters are proposed for interpolation on the optimal Body-Centered Cubic (BCC) lattice. We demonstrate that our CWB filters can well exploit the fast trilinear texture-fetching capability of modern GPUs, and outperform the state-of-the-art box-spline filters not just in terms of efficiency, but in terms of visual quality and numerical accuracy as well. Furthermore, we rigorously show that the CWB filters are better tailored to the BCC lattice than the previously proposed quasi-interpolating BCC B-spline filters, because they form a Riesz basis; exactly reproduce the original signal at the lattice points; but still provide the same approximation order.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, Cosine-Weighted B-spline (CWB) filters are proposed for interpolation on the optimal Body-Centered Cubic (BCC) lattice. We demonstrate that our CWB filters can well exploit the fast trilinear texture-fetching capability of modern GPUs, and outperform the state-of-the-art box-spline filters not just in terms of efficiency, but in terms of visual quality and numerical accuracy as well. Furthermore, we rigorously show that the CWB filters are better tailored to the BCC lattice than the previously proposed quasi-interpolating BCC B-spline filters, because they form a Riesz basis; exactly reproduce the original signal at the lattice points; but still provide the same approximation order.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, Cosine-Weighted B-spline (CWB) filters are proposed for interpolation on the optimal Body-Centered Cubic (BCC) lattice. We demonstrate that our CWB filters can well exploit the fast trilinear texture-fetching capability of modern GPUs, and outperform the state-of-the-art box-spline filters not just in terms of efficiency, but in terms of visual quality and numerical accuracy as well. Furthermore, we rigorously show that the CWB filters are better tailored to the BCC lattice than the previously proposed quasi-interpolating BCC B-spline filters, because they form a Riesz basis; exactly reproduce the original signal at the lattice points; but still provide the same approximation order.", "title": "Cosine-Weighted B-Spline Interpolation: A Fast and High-Quality Reconstruction Scheme for the Body-Centered Cubic Lattice", "normalizedTitle": "Cosine-Weighted B-Spline Interpolation: A Fast and High-Quality Reconstruction Scheme for the Body-Centered Cubic Lattice", "fno": "ttg2013091455", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Lattices", "Splines Mathematics", "Interpolation", "Passband", "Kernel", "Image Reconstruction", "Frequency Response", "Volume Visualization", "Filtering", "Sampling" ], "authors": [ { "givenName": "B.", "surname": "Csebfalvi", "fullName": "B. Csebfalvi", "affiliation": "Dept. of Control Eng. & Inf. Technol., Budapest Univ. of Technol. & Econ., Budapest, Hungary", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "09", "pubDate": "2013-09-01 00:00:00", "pubType": "trans", "pages": "1455-1466", "year": "2013", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iv/2017/0831/0/0831a405", "title": "GC1 Cubic Trigonometric Spline Function with its Geometric Attributes", "doi": null, "abstractUrl": "/proceedings-article/iv/2017/0831a405/12OmNAlvHMJ", "parentPublication": { "id": "proceedings/iv/2017/0831/0", "title": "2017 21st International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2004/8788/0/87880011", "title": "Linear and Cubic Box Splines for the Body Centered Cubic Lattice", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2004/87880011/12OmNvAiScO", "parentPublication": { "id": "proceedings/ieee-vis/2004/8788/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgi/1999/0185/0/01850188", "title": "Monotonic Cubic Spline Interpolation", "doi": null, "abstractUrl": "/proceedings-article/cgi/1999/01850188/12OmNynsbvs", "parentPublication": { "id": "proceedings/cgi/1999/0185/0", "title": "Computer Graphics International Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2012/4899/0/4899a315", "title": "Quasi-interpolation for Volumetric Data Reconstruction in S_4^2(Delta_3)", "doi": null, "abstractUrl": "/proceedings-article/icdh/2012/4899a315/12OmNzcPAbv", "parentPublication": { "id": "proceedings/icdh/2012/4899/0", "title": "4th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/03/ttg2010030499", "title": "An Evaluation of Prefiltered B-Spline Reconstruction for Quasi-Interpolation on the Body-Centered Cubic Lattice", "doi": null, "abstractUrl": "/journal/tg/2010/03/ttg2010030499/13rRUEgarBn", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/02/ttg2013020319", "title": "Quartic Box-Spline Reconstruction on the BCC Lattice", "doi": null, "abstractUrl": "/journal/tg/2013/02/ttg2013020319/13rRUxC0SvT", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/02/ttg2008020313", "title": "Practical Box Splines for Reconstruction on the Body Centered Cubic Lattice", "doi": null, "abstractUrl": "/journal/tg/2008/02/ttg2008020313/13rRUxZRbnW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/05/v1337", "title": "Extensions of the Zwart-Powell Box Spline for Volumetric Data Reconstruction on the Cartesian Lattice", "doi": null, "abstractUrl": "/journal/tg/2006/05/v1337/13rRUxjQybK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/06/ttg2008061523", "title": "Box Spline Reconstruction On The Face-Centered Cubic Lattice", "doi": null, "abstractUrl": "/journal/tg/2008/06/ttg2008061523/13rRUy0qnLC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv-2/2019/2850/0/285000a156", "title": "Cubic B-Spline Curve Interpolation with Arbitrary Derivatives on its Data Points", "doi": null, "abstractUrl": "/proceedings-article/iv-2/2019/285000a156/1cMEQEhYBC8", "parentPublication": { "id": "proceedings/iv-2/2019/2850/0", "title": "2019 23rd International Conference in Information Visualization – Part II", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2013091438", "articleId": "13rRUxASuGk", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2013091467", "articleId": "13rRUxASubz", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgOk", "name": "ttg2013091455s.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2013091455s.pdf", "extension": "pdf", "size": "64.8 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZpr3", "title": "Sept.", "year": "2013", "issueNum": "09", "idPrefix": "tg", "pubType": "journal", "volume": "19", "label": "Sept.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxASubz", "doi": "10.1109/TVCG.2013.22", "abstract": "Many algorithms have been proposed for the task of efficient compression of triangular meshes. Geometric properties of the input data are usually exploited to obtain an accurate prediction of the data at the decoder. Considerations on how to improve the prediction usually focus on its normal part, assuming that the tangential part behaves similarly. In this paper, we show that knowledge of vertex valences might allow the decoder to form a prediction that is more accurate in the tangential direction, using a weighted parallelogram prediction. This idea can be easily implemented into existing compression algorithms, such as Edgebreaker, and it can be applied at different levels of sophistication, from very simple ones, that are computationally very cheap, to some more complex ones that provide an even better compression efficiency.", "abstracts": [ { "abstractType": "Regular", "content": "Many algorithms have been proposed for the task of efficient compression of triangular meshes. Geometric properties of the input data are usually exploited to obtain an accurate prediction of the data at the decoder. Considerations on how to improve the prediction usually focus on its normal part, assuming that the tangential part behaves similarly. In this paper, we show that knowledge of vertex valences might allow the decoder to form a prediction that is more accurate in the tangential direction, using a weighted parallelogram prediction. This idea can be easily implemented into existing compression algorithms, such as Edgebreaker, and it can be applied at different levels of sophistication, from very simple ones, that are computationally very cheap, to some more complex ones that provide an even better compression efficiency.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Many algorithms have been proposed for the task of efficient compression of triangular meshes. Geometric properties of the input data are usually exploited to obtain an accurate prediction of the data at the decoder. Considerations on how to improve the prediction usually focus on its normal part, assuming that the tangential part behaves similarly. In this paper, we show that knowledge of vertex valences might allow the decoder to form a prediction that is more accurate in the tangential direction, using a weighted parallelogram prediction. This idea can be easily implemented into existing compression algorithms, such as Edgebreaker, and it can be applied at different levels of sophistication, from very simple ones, that are computationally very cheap, to some more complex ones that provide an even better compression efficiency.", "title": "Exploiting Connectivity to Improve the Tangential Part of Geometry Prediction", "normalizedTitle": "Exploiting Connectivity to Improve the Tangential Part of Geometry Prediction", "fno": "ttg2013091467", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Prediction Algorithms", "Geometry", "Decoding", "Encoding", "Shape", "Equations", "Predictive Models", "Valence", "Compression", "Mesh", "Triangle", "Parallelogram", "Prediction" ], "authors": [ { "givenName": "L.", "surname": "Vasa", "fullName": "L. Vasa", "affiliation": "Fak. fur Inf., Tech. Univ. Chemnitz, Chemnitz, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "G.", "surname": "Brunnett", "fullName": "G. Brunnett", "affiliation": "Fak. fur Inf., Tech. Univ. Chemnitz, Chemnitz, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "09", "pubDate": "2013-09-01 00:00:00", "pubType": "trans", "pages": "1467-1475", "year": "2013", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/pg/2002/1784/0/17840294", "title": "A Divide and Conquer Algorithm for Triangle Mesh Connectivity Encoding", "doi": null, "abstractUrl": "/proceedings-article/pg/2002/17840294/12OmNAhxjAW", "parentPublication": { "id": "proceedings/pg/2002/1784/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smi/2003/1845/0/18450059", "title": "Higher Order Prediction for Geometry Compression", "doi": null, "abstractUrl": "/proceedings-article/smi/2003/18450059/12OmNx8wTjc", "parentPublication": { "id": "proceedings/smi/2003/1845/0", "title": "Shape Modeling and Applications, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcc/2017/6721/0/07921914", "title": "Cluster Adapted Signalling for Intra Prediction in HEVC", "doi": null, "abstractUrl": "/proceedings-article/dcc/2017/07921914/12OmNy314iz", "parentPublication": { "id": "proceedings/dcc/2017/6721/0", "title": "2017 Data Compression Conference (DCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iih-msp/2010/4222/0/4222a184", "title": "Compression of 3D Triangle Meshes with a Generalized Parallelogram Prediction Scheme Based on Vector Quantization", "doi": null, "abstractUrl": "/proceedings-article/iih-msp/2010/4222a184/12OmNyU63rP", "parentPublication": { "id": "proceedings/iih-msp/2010/4222/0", "title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2011/348/0/06012143", "title": "Improved intra mode signaling for HEVC", "doi": null, "abstractUrl": "/proceedings-article/icme/2011/06012143/12OmNyUWQWL", "parentPublication": { "id": "proceedings/icme/2011/348/0", "title": "2011 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icis/2005/2296/0/22960583", "title": "An Efficient Connectivity Compression for Triangular Meshes", "doi": null, "abstractUrl": "/proceedings-article/icis/2005/22960583/12OmNyvY9BE", "parentPublication": { "id": "proceedings/icis/2005/2296/0", "title": "Proceedings. Fourth Annual ACIS International Conference on Computer and Information Science", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/si/2010/10/05256142", "title": "Correlation-Based Rectangular Encoding", "doi": null, "abstractUrl": "/journal/si/2010/10/05256142/13rRUwkfAWO", "parentPublication": { "id": "trans/si", "title": "IEEE Transactions on Very Large Scale Integration (VLSI) Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2018/1737/0/08486481", "title": "Scalable Point Cloud Geometry Coding with Binary Tree Embedded Quadtree", "doi": null, "abstractUrl": "/proceedings-article/icme/2018/08486481/14jQfO9lbCW", "parentPublication": { "id": "proceedings/icme/2018/1737/0", "title": "2018 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/5555/01/09968173", "title": "Sparse Tensor-Based Multiscale Representation for Point Cloud Geometry Compression", "doi": null, "abstractUrl": "/journal/tp/5555/01/09968173/1IKD7VXXRhm", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2020/1331/0/09102799", "title": "Decoder-Side Intra Mode Derivation For Next Generation Video Coding", "doi": null, "abstractUrl": "/proceedings-article/icme/2020/09102799/1kwrkC8YnSM", "parentPublication": { "id": "proceedings/icme/2020/1331/0", "title": "2020 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2013091455", "articleId": "13rRUx0xPTS", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2013091476", "articleId": "13rRUwghd98", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZpr3", "title": "Sept.", "year": "2013", "issueNum": "09", "idPrefix": "tg", "pubType": "journal", "volume": "19", "label": "Sept.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwghd98", "doi": "10.1109/TVCG.2013.62", "abstract": "Image-space line integral convolution (LIC) is a popular scheme for visualizing surface vector fields due to its simplicity and high efficiency. To avoid inconsistencies or color blur during the user interactions, existing approaches employ surface parameterization or 3D volume texture schemes. However, they often require expensive computation or memory cost, and cannot achieve consistent results in terms of both the granularity and color distribution on different scales. This paper introduces a novel image-space surface flow visualization approach that preserves the coherence during user interactions. To make the noise texture under different viewpoints coherent, we propose to precompute a sequence of mipmap noise textures in a coarse-to-fine manner for consistent transition, and map the textures onto each triangle with randomly assigned and constant texture coordinates. Further, a standard image-space LIC is performed to generate the flow texture. The proposed approach is simple and GPU-friendly, and can be easily combined with various texture-based flow visualization techniques. By leveraging viewpoint-dependent backward tracing and mipmap noise phase, our method can be incorporated with the image-based flow visualization (IBFV) technique for coherent visualization of unsteady flows. We demonstrate consistent and highly efficient flow visualization on a variety of data sets.", "abstracts": [ { "abstractType": "Regular", "content": "Image-space line integral convolution (LIC) is a popular scheme for visualizing surface vector fields due to its simplicity and high efficiency. To avoid inconsistencies or color blur during the user interactions, existing approaches employ surface parameterization or 3D volume texture schemes. However, they often require expensive computation or memory cost, and cannot achieve consistent results in terms of both the granularity and color distribution on different scales. This paper introduces a novel image-space surface flow visualization approach that preserves the coherence during user interactions. To make the noise texture under different viewpoints coherent, we propose to precompute a sequence of mipmap noise textures in a coarse-to-fine manner for consistent transition, and map the textures onto each triangle with randomly assigned and constant texture coordinates. Further, a standard image-space LIC is performed to generate the flow texture. The proposed approach is simple and GPU-friendly, and can be easily combined with various texture-based flow visualization techniques. By leveraging viewpoint-dependent backward tracing and mipmap noise phase, our method can be incorporated with the image-based flow visualization (IBFV) technique for coherent visualization of unsteady flows. We demonstrate consistent and highly efficient flow visualization on a variety of data sets.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Image-space line integral convolution (LIC) is a popular scheme for visualizing surface vector fields due to its simplicity and high efficiency. To avoid inconsistencies or color blur during the user interactions, existing approaches employ surface parameterization or 3D volume texture schemes. However, they often require expensive computation or memory cost, and cannot achieve consistent results in terms of both the granularity and color distribution on different scales. This paper introduces a novel image-space surface flow visualization approach that preserves the coherence during user interactions. To make the noise texture under different viewpoints coherent, we propose to precompute a sequence of mipmap noise textures in a coarse-to-fine manner for consistent transition, and map the textures onto each triangle with randomly assigned and constant texture coordinates. Further, a standard image-space LIC is performed to generate the flow texture. The proposed approach is simple and GPU-friendly, and can be easily combined with various texture-based flow visualization techniques. By leveraging viewpoint-dependent backward tracing and mipmap noise phase, our method can be incorporated with the image-based flow visualization (IBFV) technique for coherent visualization of unsteady flows. We demonstrate consistent and highly efficient flow visualization on a variety of data sets.", "title": "Image-Space Texture-Based Output-Coherent Surface Flow Visualization", "normalizedTitle": "Image-Space Texture-Based Output-Coherent Surface Flow Visualization", "fno": "ttg2013091476", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Visualization", "Vectors", "Surface Texture", "Image Color Analysis", "Colored Noise", "Image Resolution", "Unsteady Flows", "Flow Visualization", "Mipmap", "LIC", "IBFV", "Surface Flows" ], "authors": [ { "givenName": null, "surname": "Jin Huang", "fullName": "Jin Huang", "affiliation": "State Key Lab. of CAD & CG, Zhejiang Univ., Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": null, "surname": "Zherong Pan", "fullName": "Zherong Pan", "affiliation": "State Key Lab. of CAD & CG, Zhejiang Univ., Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": null, "surname": "Guoning Chen", "fullName": "Guoning Chen", "affiliation": "Dept. of Comput. Sci., Univ. of Houston, Houston, TX, USA", "__typename": "ArticleAuthorType" }, { "givenName": null, "surname": "Wei Chen", "fullName": "Wei Chen", "affiliation": "State Key Lab. of CAD & CG, Zhejiang Univ., Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": null, "surname": "Hujun Bao", "fullName": "Hujun Bao", "affiliation": "State Key Lab. of CAD & CG, Zhejiang Univ., Hangzhou, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "09", "pubDate": "2013-09-01 00:00:00", "pubType": "trans", "pages": "1476-1487", "year": "2013", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iv/2000/0743/0/07430303", "title": "Automatic Generation of Hair Texture with Line Integral Convolution", "doi": null, "abstractUrl": "/proceedings-article/iv/2000/07430303/12OmNAYoKl4", "parentPublication": { "id": "proceedings/iv/2000/0743/0", "title": "2000 IEEE Conference on Information Visualization. An International Conference on Computer Visualization and Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cisp/2008/3119/2/3119b145", "title": "Super Resolution of 3D Surface Texture Based on Eigen Images", "doi": null, "abstractUrl": "/proceedings-article/cisp/2008/3119b145/12OmNBC8AAT", "parentPublication": { "id": "proceedings/cisp/2008/3119/3", "title": "Image and Signal Processing, Congress on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscsct/2008/3498/2/3498b230", "title": "3D Surface Texture Synthesis Based on Wavelet Transform", "doi": null, "abstractUrl": "/proceedings-article/iscsct/2008/3498b230/12OmNBRsVxV", "parentPublication": { "id": "proceedings/iscsct/2008/3498/1", "title": "2008 International Symposium on Computer Science and Computational Technology (ISCSCT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1999/5897/0/58970038", "title": "Interactive Exploration of Volume Line Integral Convolution Based on 3D-Texture Mapping", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1999/58970038/12OmNCdk2MV", "parentPublication": { "id": "proceedings/ieee-vis/1999/5897/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/1995/7042/0/70420876", "title": "Recovering object surfaces from viewed changes in surface texture patterns", "doi": null, "abstractUrl": "/proceedings-article/iccv/1995/70420876/12OmNvT2p2H", "parentPublication": { "id": "proceedings/iccv/1995/7042/0", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2003/2030/0/20300015", "title": "A Texture-Based Framework for Spacetime-Coherent Visualization of Time-Dependent Vector Fields", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2003/20300015/12OmNyv7mgw", "parentPublication": { "id": "proceedings/ieee-vis/2003/2030/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2012/0863/0/06183584", "title": "Output-coherent image-space LIC for surface flow visualization", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2012/06183584/12OmNzw8jdp", "parentPublication": { "id": "proceedings/pacificvis/2012/0863/0", "title": "Visualization Symposium, IEEE Pacific", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/05/ttg2012050783", "title": "A 2D Flow Visualization User Study Using Explicit Flow Synthesis and Implicit Task Design", "doi": null, "abstractUrl": "/journal/tg/2012/05/ttg2012050783/13rRUwI5Ug5", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/04/ttg2008040820", "title": "Output-Sensitive 3D Line Integral Convolution", "doi": null, "abstractUrl": "/journal/tg/2008/04/ttg2008040820/13rRUwghd94", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2013/06/mcs2013060096", "title": "Texture-Based Flow Visualization", "doi": null, "abstractUrl": "/magazine/cs/2013/06/mcs2013060096/13rRUwh80yj", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2013091467", "articleId": "13rRUxASubz", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2013091488", "articleId": "13rRUwIF69j", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgwZ", "name": "ttg2013091476s.avi", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2013091476s.avi", "extension": "avi", "size": "23.4 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZpr3", "title": "Sept.", "year": "2013", "issueNum": "09", "idPrefix": "tg", "pubType": "journal", "volume": "19", "label": "Sept.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwIF69j", "doi": "10.1109/TVCG.2013.44", "abstract": "Hardware tessellation is one of the latest GPU features. Triangle or quad meshes are tessellated on-the-fly, where the tessellation level is chosen adaptively in a separate shader. The hardware tessellator only generates topology; attributes such as positions or texture coordinates of the newly generated vertices are determined in a domain shader. Typical applications of hardware tessellation are view dependent tessellation of parametric surfaces and displacement mapping. Often, the attributes for the newly generated vertices are stored in textures, which requires uv unwrapping, chartification, and atlas generation of the input meshâa process that is time consuming and often requires manual intervention. In this paper, we present an alternative representation that directly stores optimized attribute values for typical hardware tessellation patterns and simply assigns these attributes to the generated vertices at render time. Using a multilevel fitting approach, the attribute values are optimized for several resolutions. Thereby, we require no parameterization, save memory by adapting the density of the samples to the content, and avoid discontinuities by construction. Our representation is optimally suited for displacement mapping: it automatically generates seamless, view-dependent displacement mapped models. The multilevel fitting approach generates better low-resolution displacement maps than simple downfiltering. By properly blending levels, we avoid artifacts such as popping or swimming surfaces. We also show other possible applications such as signal-optimized texturing or light baking. Our representation can be evaluated in a pixel shader, resulting in signal adaptive, parameterization-free texturing, comparable to PTex or Mesh Colors. Performance evaluation shows that our representation is on par with standard texture mapping and can be updated in real time, allowing for application such as interactive sculpting.", "abstracts": [ { "abstractType": "Regular", "content": "Hardware tessellation is one of the latest GPU features. Triangle or quad meshes are tessellated on-the-fly, where the tessellation level is chosen adaptively in a separate shader. The hardware tessellator only generates topology; attributes such as positions or texture coordinates of the newly generated vertices are determined in a domain shader. Typical applications of hardware tessellation are view dependent tessellation of parametric surfaces and displacement mapping. Often, the attributes for the newly generated vertices are stored in textures, which requires uv unwrapping, chartification, and atlas generation of the input meshâa process that is time consuming and often requires manual intervention. In this paper, we present an alternative representation that directly stores optimized attribute values for typical hardware tessellation patterns and simply assigns these attributes to the generated vertices at render time. Using a multilevel fitting approach, the attribute values are optimized for several resolutions. Thereby, we require no parameterization, save memory by adapting the density of the samples to the content, and avoid discontinuities by construction. Our representation is optimally suited for displacement mapping: it automatically generates seamless, view-dependent displacement mapped models. The multilevel fitting approach generates better low-resolution displacement maps than simple downfiltering. By properly blending levels, we avoid artifacts such as popping or swimming surfaces. We also show other possible applications such as signal-optimized texturing or light baking. Our representation can be evaluated in a pixel shader, resulting in signal adaptive, parameterization-free texturing, comparable to PTex or Mesh Colors. Performance evaluation shows that our representation is on par with standard texture mapping and can be updated in real time, allowing for application such as interactive sculpting.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Hardware tessellation is one of the latest GPU features. Triangle or quad meshes are tessellated on-the-fly, where the tessellation level is chosen adaptively in a separate shader. The hardware tessellator only generates topology; attributes such as positions or texture coordinates of the newly generated vertices are determined in a domain shader. Typical applications of hardware tessellation are view dependent tessellation of parametric surfaces and displacement mapping. Often, the attributes for the newly generated vertices are stored in textures, which requires uv unwrapping, chartification, and atlas generation of the input meshâa process that is time consuming and often requires manual intervention. In this paper, we present an alternative representation that directly stores optimized attribute values for typical hardware tessellation patterns and simply assigns these attributes to the generated vertices at render time. Using a multilevel fitting approach, the attribute values are optimized for several resolutions. Thereby, we require no parameterization, save memory by adapting the density of the samples to the content, and avoid discontinuities by construction. Our representation is optimally suited for displacement mapping: it automatically generates seamless, view-dependent displacement mapped models. The multilevel fitting approach generates better low-resolution displacement maps than simple downfiltering. By properly blending levels, we avoid artifacts such as popping or swimming surfaces. We also show other possible applications such as signal-optimized texturing or light baking. Our representation can be evaluated in a pixel shader, resulting in signal adaptive, parameterization-free texturing, comparable to PTex or Mesh Colors. Performance evaluation shows that our representation is on par with standard texture mapping and can be updated in real time, allowing for application such as interactive sculpting.", "title": "Multiresolution Attributes for Hardware Tessellated Objects", "normalizedTitle": "Multiresolution Attributes for Hardware Tessellated Objects", "fno": "ttg2013091488", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Face", "Indexes", "Hardware", "Image Color Analysis", "Rendering Computer Graphics", "Interpolation", "Pipelines", "Displacement Mapping", "Signal Dependent Storage", "Hardware Tessellation" ], "authors": [ { "givenName": "H.", "surname": "Schaefer", "fullName": "H. Schaefer", "affiliation": "Univ. of Erlangen-Nuremberg, Erlangen, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "M.", "surname": "Prus", "fullName": "M. Prus", "affiliation": "Univ. of Erlangen-Nuremberg, Erlangen, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Q.", "surname": "Meyer", "fullName": "Q. Meyer", "affiliation": "Elektrobit Automotive GmbH, Erlangen, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "J.", "surname": "Suessmuth", "fullName": "J. Suessmuth", "affiliation": "Univ. of Erlangen-Nuremberg, Erlangen, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "M.", "surname": "Stamminger", "fullName": "M. Stamminger", "affiliation": "Univ. of Erlangen-Nuremberg, Erlangen, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "09", "pubDate": "2013-09-01 00:00:00", "pubType": "trans", "pages": "1488-1498", "year": "2013", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vg/2005/26/0/01500541", "title": "A simple and flexible volume rendering framework for graphics-hardware-based raycasting", "doi": null, "abstractUrl": "/proceedings-article/vg/2005/01500541/12OmNARiM8d", "parentPublication": { "id": "proceedings/vg/2005/26/0", "title": "Volume Graphics 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icitcs/2014/6541/0/07021710", "title": "A Height-Map Based Terrain Rendering with Tessellation Hardware", "doi": null, "abstractUrl": "/proceedings-article/icitcs/2014/07021710/12OmNBE7Ms6", "parentPublication": { "id": "proceedings/icitcs/2014/6541/0", "title": "2014 International Conference on IT Convergence and Security (ICITCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pbg/2005/20/0/01500315", "title": "Voronoi rasterization of sparse point sets", "doi": null, "abstractUrl": "/proceedings-article/pbg/2005/01500315/12OmNBfZSmM", "parentPublication": { "id": "proceedings/pbg/2005/20/0", "title": "Point-Based Graphics 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2016/4400/0/4400a267", "title": "A Mesh Reconstruction Method Based on View Maps", "doi": null, "abstractUrl": "/proceedings-article/icdh/2016/4400a267/12OmNBoNrqY", "parentPublication": { "id": "proceedings/icdh/2016/4400/0", "title": "2016 6th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vg/2005/26/0/01500532", "title": "Texturing and hypertexturing of volumetric objects", "doi": null, "abstractUrl": "/proceedings-article/vg/2005/01500532/12OmNBpVQ7X", "parentPublication": { "id": "proceedings/vg/2005/26/0", "title": "Volume Graphics 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2016/2303/0/2303a057", "title": "Interactive Screenspace Stream-Compaction Fragment Rendering of Direct Illumination from Area Lights", "doi": null, "abstractUrl": "/proceedings-article/cw/2016/2303a057/12OmNCdk2W8", "parentPublication": { "id": "proceedings/cw/2016/2303/0", "title": "2016 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgi/2004/2171/0/21710518", "title": "Adaptation-Based Individualized Face Modeling for Animation Using Displacement Map", "doi": null, "abstractUrl": "/proceedings-article/cgi/2004/21710518/12OmNyPQ4xb", "parentPublication": { "id": "proceedings/cgi/2004/2171/0", "title": "Proceedings. Computer Graphics International", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2009/3789/0/05298385", "title": "GPU Supported Patch-Based Tessellation for Dual Subdivision", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2009/05298385/12OmNyQ7G37", "parentPublication": { "id": "proceedings/cgiv/2009/3789/0", "title": "2009 Sixth International Conference on Computer Graphics, Imaging and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/03/ttg2013030420", "title": "IDSS: A Novel Representation for Woven Fabrics", "doi": null, "abstractUrl": "/journal/tg/2013/03/ttg2013030420/13rRUxCitJb", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pdp/2022/6958/0/695800a144", "title": "DTM-NUCA: Dynamic Texture Mapping-NUCA for Energy-Efficient Graphics Rendering", "doi": null, "abstractUrl": "/proceedings-article/pdp/2022/695800a144/1CFRWe5041y", "parentPublication": { "id": "proceedings/pdp/2022/6958/0", "title": "2022 30th Euromicro International Conference on Parallel, Distributed and Network-Based Processing (PDP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2013091476", "articleId": "13rRUwghd98", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2013091499", "articleId": "13rRUyfKIHN", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgBB", "name": "ttg2013091488s.mov", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2013091488s.mov", "extension": "mov", "size": "35.1 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZpr3", "title": "Sept.", "year": "2013", "issueNum": "09", "idPrefix": "tg", "pubType": "journal", "volume": "19", "label": "Sept.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUyfKIHN", "doi": "10.1109/TVCG.2013.61", "abstract": "In this paper, we introduce ParaGlide, a visualization system designed for interactive exploration of parameter spaces of multidimensional simulation models. To get the right parameter configuration, model developers frequently have to go back and forth between setting input parameters and qualitatively judging the outcomes of their model. Current state-of-the-art tools and practices, however, fail to provide a systematic way of exploring these parameter spaces, making informed decisions about parameter configurations a tedious and workload-intensive task. ParaGlide endeavors to overcome this shortcoming by guiding data generation using a region-based user interface for parameter sampling and then dividing the model's input parameter space into partitions that represent distinct output behavior. In particular, we found that parameter space partitioning can help model developers to better understand qualitative differences among possibly high-dimensional model outputs. Further, it provides information on parameter sensitivity and facilitates comparison of models. We developed ParaGlide in close collaboration with experts from three different domains, who all were involved in developing new models for their domain. We first analyzed current practices of six domain experts and derived a set of tasks and design requirements, then engaged in a user-centered design process, and finally conducted three longitudinal in-depth case studies underlining the usefulness of our approach.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we introduce ParaGlide, a visualization system designed for interactive exploration of parameter spaces of multidimensional simulation models. To get the right parameter configuration, model developers frequently have to go back and forth between setting input parameters and qualitatively judging the outcomes of their model. Current state-of-the-art tools and practices, however, fail to provide a systematic way of exploring these parameter spaces, making informed decisions about parameter configurations a tedious and workload-intensive task. ParaGlide endeavors to overcome this shortcoming by guiding data generation using a region-based user interface for parameter sampling and then dividing the model's input parameter space into partitions that represent distinct output behavior. In particular, we found that parameter space partitioning can help model developers to better understand qualitative differences among possibly high-dimensional model outputs. Further, it provides information on parameter sensitivity and facilitates comparison of models. We developed ParaGlide in close collaboration with experts from three different domains, who all were involved in developing new models for their domain. We first analyzed current practices of six domain experts and derived a set of tasks and design requirements, then engaged in a user-centered design process, and finally conducted three longitudinal in-depth case studies underlining the usefulness of our approach.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we introduce ParaGlide, a visualization system designed for interactive exploration of parameter spaces of multidimensional simulation models. To get the right parameter configuration, model developers frequently have to go back and forth between setting input parameters and qualitatively judging the outcomes of their model. Current state-of-the-art tools and practices, however, fail to provide a systematic way of exploring these parameter spaces, making informed decisions about parameter configurations a tedious and workload-intensive task. ParaGlide endeavors to overcome this shortcoming by guiding data generation using a region-based user interface for parameter sampling and then dividing the model's input parameter space into partitions that represent distinct output behavior. In particular, we found that parameter space partitioning can help model developers to better understand qualitative differences among possibly high-dimensional model outputs. Further, it provides information on parameter sensitivity and facilitates comparison of models. We developed ParaGlide in close collaboration with experts from three different domains, who all were involved in developing new models for their domain. We first analyzed current practices of six domain experts and derived a set of tasks and design requirements, then engaged in a user-centered design process, and finally conducted three longitudinal in-depth case studies underlining the usefulness of our approach.", "title": "ParaGlide: Interactive Parameter Space Partitioning for Computer Simulations", "normalizedTitle": "ParaGlide: Interactive Parameter Space Partitioning for Computer Simulations", "fno": "ttg2013091499", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computational Modeling", "Biological System Modeling", "Data Models", "Analytical Models", "Image Segmentation", "Mathematical Model", "Animals", "Interactive Visual Analysis", "Computer Simulation", "Parameter Space Partitioning", "Region Based Experimental Design", "Similarity Based Embedding" ], "authors": [ { "givenName": "S.", "surname": "Bergner", "fullName": "S. Bergner", "affiliation": "Dept. of Comput. Sci., Simon Fraser Univ., Burnaby, BC, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "M.", "surname": "Sedlmair", "fullName": "M. Sedlmair", "affiliation": "Fak. fur Inf., Univ. Wien, Vienna, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "T.", "surname": "Moller", "fullName": "T. Moller", "affiliation": "Fak. fur Inf., Univ. Wien, Vienna, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "S. N.", "surname": "Abdolyousefi", "fullName": "S. N. Abdolyousefi", "affiliation": "Dept. of Math., Simon Fraser Univ., Burnaby, BC, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "A.", "surname": "Saad", "fullName": "A. Saad", "affiliation": "Dept. of Comput. Sci., Simon Fraser Univ., Burnaby, BC, Canada", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "09", "pubDate": "2013-09-01 00:00:00", "pubType": "trans", "pages": "1499-1512", "year": "2013", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tb/2020/01/08374844", "title": "Using Emulation to Engineer and Understand Simulations of Biological Systems", "doi": null, "abstractUrl": "/journal/tb/2020/01/08374844/13rRUwjGoKb", "parentPublication": { "id": "trans/tb", "title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/12/06876043", "title": "Visual Parameter Space Analysis: A Conceptual Framework", "doi": null, "abstractUrl": "/journal/tg/2014/12/06876043/13rRUytF41C", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/01/08440838", "title": "Drag and Track: A Direct Manipulation Interface for Contextualizing Data Instances within a Continuous Parameter Space", "doi": null, "abstractUrl": "/journal/tg/2019/01/08440838/17D45Wt3Exw", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/01/08464305", "title": "RegressionExplorer: Interactive Exploration of Logistic Regression Models with Subgroup Analysis", "doi": null, "abstractUrl": "/journal/tg/2019/01/08464305/17D45Xtvpee", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2022/03/09763014", "title": "DLA-VPS: Deep-Learning-Assisted Visual Parameter Space Analysis of Cosmological Simulations", "doi": null, "abstractUrl": "/magazine/cg/2022/03/09763014/1CT51kfyJhe", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/06/09751203", "title": "GNN-Surrogate: A Hierarchical and Adaptive Graph Neural Network for Parameter Space Exploration of Unstructured-Mesh Ocean Simulations", "doi": null, "abstractUrl": "/journal/tg/2022/06/09751203/1CnxNEIPqE0", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09904429", "title": "VDL-Surrogate: A View-Dependent Latent-based Model for Parameter Space Exploration of Ensemble Simulations", "doi": null, "abstractUrl": "/journal/tg/2023/01/09904429/1H1gjOQxk40", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08805426", "title": "InSituNet: Deep Image Synthesis for Parameter Space Exploration of Ensemble Simulations", "doi": null, "abstractUrl": "/journal/tg/2020/01/08805426/1cG4qBnK6Fq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2020/6215/0/09313311", "title": "BioMETA: A multiple specification parameter estimation system for stochastic biochemical models", "doi": null, "abstractUrl": "/proceedings-article/bibm/2020/09313311/1qmfSB289na", "parentPublication": { "id": "proceedings/bibm/2020/6215/0", "title": "2020 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2021/3574/0/357400a984", "title": "Machine Learning-assisted Computational Steering of Large-scale Scientific Simulations", "doi": null, "abstractUrl": "/proceedings-article/ispa-bdcloud-socialcom-sustaincom/2021/357400a984/1zxLcFK6w5W", "parentPublication": { "id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2021/3574/0", "title": "2021 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2013091488", "articleId": "13rRUwIF69j", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2013091513", "articleId": "13rRUxD9gXH", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXWRKc", "name": "ttg2013091499s.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2013091499s.zip", "extension": "zip", "size": "5.92 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZpr3", "title": "Sept.", "year": "2013", "issueNum": "09", "idPrefix": "tg", "pubType": "journal", "volume": "19", "label": "Sept.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxD9gXH", "doi": "10.1109/TVCG.2013.71", "abstract": "We present a novel, linear programming (LP)-based scheduling algorithm that exploits heterogeneous multicore architectures such as CPUs and GPUs to accelerate a wide variety of proximity queries. To represent complicated performance relationships between heterogeneous architectures and different computations of proximity queries, we propose a simple, yet accurate model that measures the expected running time of these computations. Based on this model, we formulate an optimization problem that minimizes the largest time spent on computing resources, and propose a novel, iterative LP-based scheduling algorithm. Since our method is general, we are able to apply our method into various proximity queries used in five different applications that have different characteristics. Our method achieves an order of magnitude performance improvement by using four different GPUs and two hexa-core CPUs over using a hexa-core CPU only. Unlike prior scheduling methods, our method continually improves the performance, as we add more computing resources. Also, our method achieves much higher performance improvement compared with prior methods as heterogeneity of computing resources is increased. Moreover, for one of tested applications, our method achieves even higher performance than a prior parallel method optimized manually for the application. We also show that our method provides results that are close (e.g., 75 percent) to the performance provided by a conservative upper bound of the ideal throughput. These results demonstrate the efficiency and robustness of our algorithm that have not been achieved by prior methods. In addition, we integrate one of our contributions with a work stealing method. Our version of the work stealing method achieves 18 percent performance improvement on average over the original work stealing method. This result shows wide applicability of our approach.", "abstracts": [ { "abstractType": "Regular", "content": "We present a novel, linear programming (LP)-based scheduling algorithm that exploits heterogeneous multicore architectures such as CPUs and GPUs to accelerate a wide variety of proximity queries. To represent complicated performance relationships between heterogeneous architectures and different computations of proximity queries, we propose a simple, yet accurate model that measures the expected running time of these computations. Based on this model, we formulate an optimization problem that minimizes the largest time spent on computing resources, and propose a novel, iterative LP-based scheduling algorithm. Since our method is general, we are able to apply our method into various proximity queries used in five different applications that have different characteristics. Our method achieves an order of magnitude performance improvement by using four different GPUs and two hexa-core CPUs over using a hexa-core CPU only. Unlike prior scheduling methods, our method continually improves the performance, as we add more computing resources. Also, our method achieves much higher performance improvement compared with prior methods as heterogeneity of computing resources is increased. Moreover, for one of tested applications, our method achieves even higher performance than a prior parallel method optimized manually for the application. We also show that our method provides results that are close (e.g., 75 percent) to the performance provided by a conservative upper bound of the ideal throughput. These results demonstrate the efficiency and robustness of our algorithm that have not been achieved by prior methods. In addition, we integrate one of our contributions with a work stealing method. Our version of the work stealing method achieves 18 percent performance improvement on average over the original work stealing method. This result shows wide applicability of our approach.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a novel, linear programming (LP)-based scheduling algorithm that exploits heterogeneous multicore architectures such as CPUs and GPUs to accelerate a wide variety of proximity queries. To represent complicated performance relationships between heterogeneous architectures and different computations of proximity queries, we propose a simple, yet accurate model that measures the expected running time of these computations. Based on this model, we formulate an optimization problem that minimizes the largest time spent on computing resources, and propose a novel, iterative LP-based scheduling algorithm. Since our method is general, we are able to apply our method into various proximity queries used in five different applications that have different characteristics. Our method achieves an order of magnitude performance improvement by using four different GPUs and two hexa-core CPUs over using a hexa-core CPU only. Unlike prior scheduling methods, our method continually improves the performance, as we add more computing resources. Also, our method achieves much higher performance improvement compared with prior methods as heterogeneity of computing resources is increased. Moreover, for one of tested applications, our method achieves even higher performance than a prior parallel method optimized manually for the application. We also show that our method provides results that are close (e.g., 75 percent) to the performance provided by a conservative upper bound of the ideal throughput. These results demonstrate the efficiency and robustness of our algorithm that have not been achieved by prior methods. In addition, we integrate one of our contributions with a work stealing method. Our version of the work stealing method achieves 18 percent performance improvement on average over the original work stealing method. This result shows wide applicability of our approach.", "title": "Scheduling in Heterogeneous Computing Environments for Proximity Queries", "normalizedTitle": "Scheduling in Heterogeneous Computing Environments for Proximity Queries", "fno": "ttg2013091513", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computational Modeling", "Multicore Processing", "Scheduling Algorithms", "Optimization", "Acceleration", "Motion Planning", "Heterogeneous System", "Proximity Query", "Scheduling", "Collision Detection", "Ray Tracing" ], "authors": [ { "givenName": null, "surname": "Duksu Kim", "fullName": "Duksu Kim", "affiliation": "Dept. of Comput. Sci., Korea Adv. Inst. of Sci. & Technol., Daejeon, South Korea", "__typename": "ArticleAuthorType" }, { "givenName": null, "surname": "Jinkyu Lee", "fullName": "Jinkyu Lee", "affiliation": "Dept. of Electr. Eng. & Comput. Sci., Univ. of Michigan, Ann Arbor, MI, USA", "__typename": "ArticleAuthorType" }, { "givenName": null, "surname": "Junghwan Lee", "fullName": "Junghwan Lee", "affiliation": "Dept. of Comput. Sci., Korea Adv. Inst. of Sci. & Technol., Daejeon, South Korea", "__typename": "ArticleAuthorType" }, { "givenName": null, "surname": "Insik Shin", "fullName": "Insik Shin", "affiliation": "Dept. of Comput. Sci., Korea Adv. Inst. of Sci. & Technol., Daejeon, South Korea", "__typename": "ArticleAuthorType" }, { "givenName": "J.", "surname": "Kim", "fullName": "J. Kim", "affiliation": "Dept. of Comput. Sci., Korea Adv. Inst. of Sci. & Technol., Daejeon, South Korea", "__typename": "ArticleAuthorType" }, { "givenName": null, "surname": "Sung-Eui Yoon", "fullName": "Sung-Eui Yoon", "affiliation": "Dept. of Comput. Sci., Korea Adv. Inst. of Sci. & Technol., Daejeon, South Korea", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "09", "pubDate": "2013-09-01 00:00:00", "pubType": "trans", "pages": "1513-1525", "year": "2013", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ipdpsw/2014/4116/0/4116a642", "title": "EEWA: Energy-Efficient Workload-Aware Task Scheduling in Multi-core Architectures", "doi": null, "abstractUrl": "/proceedings-article/ipdpsw/2014/4116a642/12OmNvF83pm", "parentPublication": { "id": "proceedings/ipdpsw/2014/4116/0", "title": "2014 IEEE International Parallel & Distributed Processing Symposium Workshops (IPDPSW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gmp/2000/0562/0/05620115", "title": "Proximity Queries for Modeling and Manufacturing", "doi": null, "abstractUrl": "/proceedings-article/gmp/2000/05620115/12OmNwE9OJs", "parentPublication": { "id": "proceedings/gmp/2000/0562/0", "title": "Geometric Modeling and Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdps/2012/4675/0/4675a249", "title": "WATS: Workload-Aware Task Scheduling in Asymmetric Multi-core Architectures", "doi": null, "abstractUrl": "/proceedings-article/ipdps/2012/4675a249/12OmNx965DX", "parentPublication": { "id": "proceedings/ipdps/2012/4675/0", "title": "Parallel and Distributed Processing Symposium, International", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icde/1995/6910/0/69100147", "title": "Efficient processing of proximity queries for large databases", "doi": null, "abstractUrl": "/proceedings-article/icde/1995/69100147/12OmNxFJXJp", "parentPublication": { "id": "proceedings/icde/1995/6910/0", "title": "Proceedings of the Eleventh International Conference on Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ccgrid/2015/8006/0/8006a041", "title": "Towards Efficient Work-Stealing in Virtualized Environments", "doi": null, "abstractUrl": "/proceedings-article/ccgrid/2015/8006a041/12OmNym2c4P", "parentPublication": { "id": "proceedings/ccgrid/2015/8006/0", "title": "2015 15th IEEE/ACM International Symposium on Cluster, Cloud and Grid Computing (CCGrid)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rtss/2016/5303/0/5303a203", "title": "Randomized Work Stealing for Large Scale Soft Real-Time Systems", "doi": null, "abstractUrl": "/proceedings-article/rtss/2016/5303a203/12OmNzt0IFj", "parentPublication": { "id": "proceedings/rtss/2016/5303/0", "title": "2016 IEEE Real-Time Systems Symposium (RTSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2016/08/07300465", "title": "Robinhood: Towards Efficient Work-Stealing in Virtualized Environments", "doi": null, "abstractUrl": "/journal/td/2016/08/07300465/13rRUB6Sq0j", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2013/12/ttd2013122334", "title": "Adaptive Cache Aware Bitier Work-Stealing in Multisocket Multicore Architectures", "doi": null, "abstractUrl": "/journal/td/2013/12/ttd2013122334/13rRUwbs2g9", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2018/11/08329541", "title": "High-Order Proximity Preserved Embedding for Dynamic Networks", "doi": null, "abstractUrl": "/journal/tk/2018/11/08329541/147pbJSQD4d", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2021/3574/0/357400a594", "title": "A Genetic Algorithm for Scheduling in Heterogeneous Multicore System Integrated with FPGA", "doi": null, "abstractUrl": "/proceedings-article/ispa-bdcloud-socialcom-sustaincom/2021/357400a594/1zxL4ISv8mA", "parentPublication": { "id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2021/3574/0", "title": "2021 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2013091499", "articleId": "13rRUyfKIHN", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2013091526", "articleId": "13rRUxC0SEh", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXWRFY", "name": "ttg2013091513s1.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2013091513s1.pdf", "extension": "pdf", "size": "40 kB", "__typename": "WebExtraType" }, { "id": "17ShDTXWRFZ", "name": "ttg2013091513s2.mov", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2013091513s2.mov", "extension": "mov", "size": "25.5 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZpr3", "title": "Sept.", "year": "2013", "issueNum": "09", "idPrefix": "tg", "pubType": "journal", "volume": "19", "label": "Sept.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxC0SEh", "doi": "10.1109/TVCG.2013.65", "abstract": "We introduce Splatterplots, a novel presentation of scattered data that enables visualizations that scale beyond standard scatter plots. Traditional scatter plots suffer from overdraw (overlapping glyphs) as the number of points per unit area increases. Overdraw obscures outliers, hides data distributions, and makes the relationship among subgroups of the data difficult to discern. To address these issues, Splatterplots abstract away information such that the density of data shown in any unit of screen space is bounded, while allowing continuous zoom to reveal abstracted details. Abstraction automatically groups dense data points into contours and samples remaining points. We combine techniques for abstraction with perceptually based color blending to reveal the relationship between data subgroups. The resulting visualizations represent the dense regions of each subgroup of the data set as smooth closed shapes and show representative outliers explicitly. We present techniques that leverage the GPU for Splatterplot computation and rendering, enabling interaction with massive data sets. We show how Splatterplots can be an effective alternative to traditional methods of displaying scatter data communicating data trends, outliers, and data set relationships much like traditional scatter plots, but scaling to data sets of higher density and up to millions of points on the screen.", "abstracts": [ { "abstractType": "Regular", "content": "We introduce Splatterplots, a novel presentation of scattered data that enables visualizations that scale beyond standard scatter plots. Traditional scatter plots suffer from overdraw (overlapping glyphs) as the number of points per unit area increases. Overdraw obscures outliers, hides data distributions, and makes the relationship among subgroups of the data difficult to discern. To address these issues, Splatterplots abstract away information such that the density of data shown in any unit of screen space is bounded, while allowing continuous zoom to reveal abstracted details. Abstraction automatically groups dense data points into contours and samples remaining points. We combine techniques for abstraction with perceptually based color blending to reveal the relationship between data subgroups. The resulting visualizations represent the dense regions of each subgroup of the data set as smooth closed shapes and show representative outliers explicitly. We present techniques that leverage the GPU for Splatterplot computation and rendering, enabling interaction with massive data sets. We show how Splatterplots can be an effective alternative to traditional methods of displaying scatter data communicating data trends, outliers, and data set relationships much like traditional scatter plots, but scaling to data sets of higher density and up to millions of points on the screen.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We introduce Splatterplots, a novel presentation of scattered data that enables visualizations that scale beyond standard scatter plots. Traditional scatter plots suffer from overdraw (overlapping glyphs) as the number of points per unit area increases. Overdraw obscures outliers, hides data distributions, and makes the relationship among subgroups of the data difficult to discern. To address these issues, Splatterplots abstract away information such that the density of data shown in any unit of screen space is bounded, while allowing continuous zoom to reveal abstracted details. Abstraction automatically groups dense data points into contours and samples remaining points. We combine techniques for abstraction with perceptually based color blending to reveal the relationship between data subgroups. The resulting visualizations represent the dense regions of each subgroup of the data set as smooth closed shapes and show representative outliers explicitly. We present techniques that leverage the GPU for Splatterplot computation and rendering, enabling interaction with massive data sets. We show how Splatterplots can be an effective alternative to traditional methods of displaying scatter data communicating data trends, outliers, and data set relationships much like traditional scatter plots, but scaling to data sets of higher density and up to millions of points on the screen.", "title": "Splatterplots: Overcoming Overdraw in Scatter Plots", "normalizedTitle": "Splatterplots: Overcoming Overdraw in Scatter Plots", "fno": "ttg2013091526", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Visualization", "Image Color Analysis", "Data Visualization", "Encoding", "Shape", "Clutter", "Statistical Graphics", "Scalability Issues", "Visual Design", "Perception Theory" ], "authors": [ { "givenName": "A.", "surname": "Mayorga", "fullName": "A. Mayorga", "affiliation": "Dept. of Comput. Sci., Univ. of Wisconsin, Madison, WI, USA", "__typename": "ArticleAuthorType" }, { "givenName": "M.", "surname": "Gleicher", "fullName": "M. Gleicher", "affiliation": "Dept. of Comput. Sci., Univ. of Wisconsin, Madison, WI, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "09", "pubDate": "2013-09-01 00:00:00", "pubType": "trans", "pages": "1526-1538", "year": "2013", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/bdva/2015/7343/0/07314294", "title": "Guiding the Exploration of Scatter Plot Data Using Motif-Based Interest Measures", "doi": null, "abstractUrl": "/proceedings-article/bdva/2015/07314294/12OmNwHz03z", "parentPublication": { "id": "proceedings/bdva/2015/7343/0", "title": "2015 Big Data Visual Analytics (BDVA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2017/0852/0/0852a099", "title": "Stem & Leaf Plots Extended for Text Visualizations", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2017/0852a099/12OmNy5zspa", "parentPublication": { "id": "proceedings/cgiv/2017/0852/0", "title": "2017 14th International Conference on Computer Graphics, Imaging and Visualization (CGiV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2013/4892/0/4892b522", "title": "Enhancing Scatter Plots Using Ellipsoid Pixel Placement and Shading", "doi": null, "abstractUrl": "/proceedings-article/hicss/2013/4892b522/12OmNzwpUnq", "parentPublication": { "id": "proceedings/hicss/2013/4892/0", "title": "2013 46th Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/01/07192696", "title": "Orientation-Enhanced Parallel Coordinate Plots", "doi": null, "abstractUrl": "/journal/tg/2016/01/07192696/13rRUwkxc5q", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/06/ttg2010060980", "title": "Matching Visual Saliency to Confidence in Plots of Uncertain Data", "doi": null, "abstractUrl": "/journal/tg/2010/06/ttg2010060980/13rRUxZRbnY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/12/06875982", "title": "Visual Abstraction and Exploration of Multi-class Scatterplots", "doi": null, "abstractUrl": "/journal/tg/2014/12/06875982/13rRUygT7ye", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/06/08667672", "title": "Relaxing Dense Scatter Plots with Pixel-Based Mappings", "doi": null, "abstractUrl": "/journal/tg/2019/06/08667672/18q6mW4N2ZW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08809844", "title": "A Recursive Subdivision Technique for Sampling Multi-class Scatterplots", "doi": null, "abstractUrl": "/journal/tg/2020/01/08809844/1cHEfHRrSOQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2020/8316/0/831600a372", "title": "LP-Explain: Local Pictorial Explanation for Outliers", "doi": null, "abstractUrl": "/proceedings-article/icdm/2020/831600a372/1r54AqoaifC", "parentPublication": { "id": "proceedings/icdm/2020/8316/0", "title": "2020 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2020/9134/0/913400a080", "title": "Enhancing Scatter-plots with Start-plots for Visualising Multi-dimensional Data", "doi": null, "abstractUrl": "/proceedings-article/iv/2020/913400a080/1rSR9e3cPi8", "parentPublication": { "id": "proceedings/iv/2020/9134/0", "title": "2020 24th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2013091513", "articleId": "13rRUxD9gXH", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2013091539", "articleId": "13rRUEgarsH", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTYesQU", "name": "ttg2013091526s.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2013091526s.mp4", "extension": "mp4", "size": "14.9 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZpr3", "title": "Sept.", "year": "2013", "issueNum": "09", "idPrefix": "tg", "pubType": "journal", "volume": "19", "label": "Sept.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUEgarsH", "doi": "10.1109/TVCG.2012.177", "abstract": "This paper develops a novel volumetric parameterization and spline construction framework, which is an effective modeling tool for converting surface meshes to volumetric splines. Our new splines are defined upon a novel parametric domain called generalized polycubes (GPCs). A GPC comprises a set of regular cube domains topologically glued together. Compared with conventional polycubes (CPCs), the GPC is much more powerful and flexible and has improved numerical accuracy and computational efficiency when serving as a parametric domain. We design an automatic algorithm to construct the GPC domain while also permitting the user to improve shape abstraction via interactive intervention. We then parameterize the input model on the GPC domain. Finally, we devise a new volumetric spline scheme based on this seamless volumetric parameterization. With a hierarchical fitting scheme, the proposed splines can fit data accurately using reduced number of superfluous control points. Our volumetric modeling scheme has great potential in shape modeling, engineering analysis, and reverse engineering applications.", "abstracts": [ { "abstractType": "Regular", "content": "This paper develops a novel volumetric parameterization and spline construction framework, which is an effective modeling tool for converting surface meshes to volumetric splines. Our new splines are defined upon a novel parametric domain called generalized polycubes (GPCs). A GPC comprises a set of regular cube domains topologically glued together. Compared with conventional polycubes (CPCs), the GPC is much more powerful and flexible and has improved numerical accuracy and computational efficiency when serving as a parametric domain. We design an automatic algorithm to construct the GPC domain while also permitting the user to improve shape abstraction via interactive intervention. We then parameterize the input model on the GPC domain. Finally, we devise a new volumetric spline scheme based on this seamless volumetric parameterization. With a hierarchical fitting scheme, the proposed splines can fit data accurately using reduced number of superfluous control points. Our volumetric modeling scheme has great potential in shape modeling, engineering analysis, and reverse engineering applications.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper develops a novel volumetric parameterization and spline construction framework, which is an effective modeling tool for converting surface meshes to volumetric splines. Our new splines are defined upon a novel parametric domain called generalized polycubes (GPCs). A GPC comprises a set of regular cube domains topologically glued together. Compared with conventional polycubes (CPCs), the GPC is much more powerful and flexible and has improved numerical accuracy and computational efficiency when serving as a parametric domain. We design an automatic algorithm to construct the GPC domain while also permitting the user to improve shape abstraction via interactive intervention. We then parameterize the input model on the GPC domain. Finally, we devise a new volumetric spline scheme based on this seamless volumetric parameterization. With a hierarchical fitting scheme, the proposed splines can fit data accurately using reduced number of superfluous control points. Our volumetric modeling scheme has great potential in shape modeling, engineering analysis, and reverse engineering applications.", "title": "Surface Mesh to Volumetric Spline Conversion with Generalized Polycubes", "normalizedTitle": "Surface Mesh to Volumetric Spline Conversion with Generalized Polycubes", "fno": "ttg2013091539", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Splines Mathematics", "Shape", "Solid Modeling", "Computational Modeling", "Solids", "Topology", "Algorithm Design And Analysis", "Volumetric Parameterization", "Volumetric Spline", "Generalized Polycube" ], "authors": [ { "givenName": null, "surname": "Bo Li", "fullName": "Bo Li", "affiliation": "Dept. of Comput. Sci., Stony Brook Univ., Stony Brook, NY, USA", "__typename": "ArticleAuthorType" }, { "givenName": null, "surname": "Xin Li", "fullName": "Xin Li", "affiliation": "Sch. of Electr. Eng. & Comput. Sci., Louisiana State Univ., Baton Rouge, LA, USA", "__typename": "ArticleAuthorType" }, { "givenName": null, "surname": "Kexiang Wang", "fullName": "Kexiang Wang", "affiliation": "Dept. of Comput. Sci., Stony Brook Univ., Stony Brook, NY, USA", "__typename": "ArticleAuthorType" }, { "givenName": null, "surname": "Hong Qin", "fullName": "Hong Qin", "affiliation": "Dept. of Comput. Sci., Stony Brook Univ., Stony Brook, NY, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "09", "pubDate": "2013-09-01 00:00:00", "pubType": "trans", "pages": "1539-1551", "year": "2013", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icdh/2016/4400/0/4400a272", "title": "A Balanced Surface Parameterization Method and Its Application to Spline Fitting", "doi": null, "abstractUrl": "/proceedings-article/icdh/2016/4400a272/12OmNBC8Awf", "parentPublication": { "id": "proceedings/icdh/2016/4400/0", "title": "2016 6th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aiccsa/2016/4320/0/07945738", "title": "Toward an optimal B-spline wavelet transform for image compression", "doi": null, "abstractUrl": "/proceedings-article/aiccsa/2016/07945738/12OmNwAKCKX", "parentPublication": { "id": "proceedings/aiccsa/2016/4320/0", "title": "2016 IEEE/ACS 13th International Conference of Computer Systems and Applications (AICCSA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2013/4796/0/06781890", "title": "Image enhancement using E-spline functions", "doi": null, "abstractUrl": "/proceedings-article/isspit/2013/06781890/12OmNwM6A0m", "parentPublication": { "id": "proceedings/isspit/2013/4796/0", "title": "2013 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smi/2010/7259/0/05521474", "title": "Generalized PolyCube Trivariate Splines", "doi": null, "abstractUrl": "/proceedings-article/smi/2010/05521474/12OmNwsNR9b", "parentPublication": { "id": "proceedings/smi/2010/7259/0", "title": "Shape Modeling International (SMI 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smi/2010/7259/0/05521444", "title": "Direct-Product Volumetric Parameterization of Handlebodies via Harmonic Fields", "doi": null, "abstractUrl": "/proceedings-article/smi/2010/05521444/12OmNx965xN", "parentPublication": { "id": "proceedings/smi/2010/7259/0", "title": "Shape Modeling International (SMI 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/07/07226867", "title": "Structured Volume Decomposition via Generalized Sweeping", "doi": null, "abstractUrl": "/journal/tg/2016/07/07226867/13rRUEgarsK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/05/ttg2012050703", "title": "Restricted Trivariate Polycube Splines for Volumetric Data Modeling", "doi": null, "abstractUrl": "/journal/tg/2012/05/ttg2012050703/13rRUIM2VH1", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/02/08066313", "title": "Feature-Based Volumetric Terrain Generation and Decoration", "doi": null, "abstractUrl": "/journal/tg/2019/02/08066313/17D45WrVgaB", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/10113186", "title": "Patching Non-Uniform Extraordinary Points", "doi": null, "abstractUrl": "/journal/tg/5555/01/10113186/1MNbNVYb4sw", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv-2/2019/2850/0/285000a147", "title": "Volume Completion for Trimmed B-Reps", "doi": null, "abstractUrl": "/proceedings-article/iv-2/2019/285000a147/1cMEQrpMzQI", "parentPublication": { "id": "proceedings/iv-2/2019/2850/0", "title": "2019 23rd International Conference in Information Visualization – Part II", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2013091526", "articleId": "13rRUxC0SEh", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2013091552", "articleId": "13rRUxAAT7E", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZpr3", "title": "Sept.", "year": "2013", "issueNum": "09", "idPrefix": "tg", "pubType": "journal", "volume": "19", "label": "Sept.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxAAT7E", "doi": "10.1109/TVCG.2013.67", "abstract": "Virtual try-on applications have become popular because they allow users to watch themselves wearing different clothes without the effort of changing them physically. This helps users to make quick buying decisions and, thus, improves the sales efficiency of retailers. Previous solutions usually involve motion capture, 3D reconstruction or modeling, which are time consuming and not robust for all body poses. Our method avoids these steps by combining image-based renderings of the user and previously recorded garments. It transfers the appearance of a garment recorded from one user to another by matching input and recorded frames, image-based visual hull rendering, and online registration methods. Using images of real garments allows for a realistic rendering quality with high performance. It is suitable for a wide range of clothes and complex appearances, allows arbitrary viewing angles, and requires only little manual input. Our system is particularly useful for virtual try-on applications as well as interactive games.", "abstracts": [ { "abstractType": "Regular", "content": "Virtual try-on applications have become popular because they allow users to watch themselves wearing different clothes without the effort of changing them physically. This helps users to make quick buying decisions and, thus, improves the sales efficiency of retailers. Previous solutions usually involve motion capture, 3D reconstruction or modeling, which are time consuming and not robust for all body poses. Our method avoids these steps by combining image-based renderings of the user and previously recorded garments. It transfers the appearance of a garment recorded from one user to another by matching input and recorded frames, image-based visual hull rendering, and online registration methods. Using images of real garments allows for a realistic rendering quality with high performance. It is suitable for a wide range of clothes and complex appearances, allows arbitrary viewing angles, and requires only little manual input. Our system is particularly useful for virtual try-on applications as well as interactive games.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Virtual try-on applications have become popular because they allow users to watch themselves wearing different clothes without the effort of changing them physically. This helps users to make quick buying decisions and, thus, improves the sales efficiency of retailers. Previous solutions usually involve motion capture, 3D reconstruction or modeling, which are time consuming and not robust for all body poses. Our method avoids these steps by combining image-based renderings of the user and previously recorded garments. It transfers the appearance of a garment recorded from one user to another by matching input and recorded frames, image-based visual hull rendering, and online registration methods. Using images of real garments allows for a realistic rendering quality with high performance. It is suitable for a wide range of clothes and complex appearances, allows arbitrary viewing angles, and requires only little manual input. Our system is particularly useful for virtual try-on applications as well as interactive games.", "title": "Virtual Try-On through Image-Based Rendering", "normalizedTitle": "Virtual Try-On through Image-Based Rendering", "fno": "ttg2013091552", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Databases", "Rendering Computer Graphics", "Feature Extraction", "Runtime", "Virtual Try On", "Mixed Reality", "Augmented Reality", "Image Based Rendering" ], "authors": [ { "givenName": "Stefan", "surname": "Hauswiesner", "fullName": "Stefan Hauswiesner", "affiliation": "Graz University of Technology, Graz", "__typename": "ArticleAuthorType" }, { "givenName": "Matthias", "surname": "Straka", "fullName": "Matthias Straka", "affiliation": "Graz University of Technology, Graz", "__typename": "ArticleAuthorType" }, { "givenName": "Gerhard", "surname": "Reitmayr", "fullName": "Gerhard Reitmayr", "affiliation": "Graz University of Technology, Graz", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "09", "pubDate": "2013-09-01 00:00:00", "pubType": "trans", "pages": "1552-1565", "year": "2013", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/Ismar-mashd/2015/9628/0/9628a059", "title": "Augmented Reality Tool for Markerless Virtual Try-on around Human Arm", "doi": null, "abstractUrl": "/proceedings-article/Ismar-mashd/2015/9628a059/12OmNBtCCIY", "parentPublication": { "id": "proceedings/Ismar-mashd/2015/9628/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality - Media, Art, Social Science, Humanities and Design (ISMAR-MASH'D)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2021/0126/0/09669371", "title": "Cascaded Cross-Domain Fusion of Virtual Try-On", "doi": null, "abstractUrl": "/proceedings-article/bibm/2021/09669371/1A9WqqJlIZy", "parentPublication": { "id": "proceedings/bibm/2021/0126/0", "title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200f413", "title": "ZFlow: Gated Appearance Flow-based Virtual Try-on with 3D Priors", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200f413/1BmIaz08Ave", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200n3789", "title": "FashionMirror: Co-attention Feature-remapping Virtual Try-on with Sequential Template Poses", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200n3789/1BmIvggmR8I", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600d450", "title": "Full-Range Virtual Try-On with Recurrent Tri-Level Transform", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600d450/1H1iYjiJJsI", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300j025", "title": "Towards Multi-Pose Guided Virtual Try-On Network", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300j025/1hVl6EovMju", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300b161", "title": "FW-GAN: Flow-Navigated Warping GAN for Video Virtual Try-On", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300b161/1hVlhabuOgo", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800h847", "title": "Towards Photo-Realistic Virtual Try-On by Adaptively Generating↔Preserving Image Content", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800h847/1m3nGrL41va", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800f183", "title": "Image Based Virtual Try-On Network From Unpaired Data", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800f183/1m3o1BjvjZm", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900q6923", "title": "Disentangled Cycle Consistency for Highly-realistic Virtual Try-On", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900q6923/1yeLBaa0gzC", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2013091539", "articleId": "13rRUEgarsH", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2013091566", "articleId": "13rRUwInvsP", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgzW", "name": "ttg2013091552s.avi", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2013091552s.avi", "extension": "avi", "size": "42.5 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZpr3", "title": "Sept.", "year": "2013", "issueNum": "09", "idPrefix": "tg", "pubType": "journal", "volume": "19", "label": "Sept.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwInvsP", "doi": "10.1109/TVCG.2013.21", "abstract": "This paper proposes the VisibilityCluster algorithm for efficient visibility approximation and representation in many-light rendering. By carefully clustering lights and shading points, we can construct a visibility matrix that exhibits good local structures due to visibility coherence of nearby lights and shading points. Average visibility can be efficiently estimated by exploiting the sparse structure of the matrix and shooting only few shadow rays between clusters. Moreover, we can use the estimated average visibility as a quality measure for visibility estimation, enabling us to locally refine VisibilityClusters with large visibility variance for improving accuracy. We demonstrate that, with the proposed method, visibility can be incorporated into importance sampling at a reasonable cost for the many-light problem, significantly reducing variance in Monte Carlo rendering. In addition, the proposed method can be used to increase realism of local shading by adding directional occlusion effects. Experiments show that the proposed technique outperforms state-of-the-art importance sampling algorithms, and successfully enhances the preview quality for lighting design.", "abstracts": [ { "abstractType": "Regular", "content": "This paper proposes the VisibilityCluster algorithm for efficient visibility approximation and representation in many-light rendering. By carefully clustering lights and shading points, we can construct a visibility matrix that exhibits good local structures due to visibility coherence of nearby lights and shading points. Average visibility can be efficiently estimated by exploiting the sparse structure of the matrix and shooting only few shadow rays between clusters. Moreover, we can use the estimated average visibility as a quality measure for visibility estimation, enabling us to locally refine VisibilityClusters with large visibility variance for improving accuracy. We demonstrate that, with the proposed method, visibility can be incorporated into importance sampling at a reasonable cost for the many-light problem, significantly reducing variance in Monte Carlo rendering. In addition, the proposed method can be used to increase realism of local shading by adding directional occlusion effects. Experiments show that the proposed technique outperforms state-of-the-art importance sampling algorithms, and successfully enhances the preview quality for lighting design.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper proposes the VisibilityCluster algorithm for efficient visibility approximation and representation in many-light rendering. By carefully clustering lights and shading points, we can construct a visibility matrix that exhibits good local structures due to visibility coherence of nearby lights and shading points. Average visibility can be efficiently estimated by exploiting the sparse structure of the matrix and shooting only few shadow rays between clusters. Moreover, we can use the estimated average visibility as a quality measure for visibility estimation, enabling us to locally refine VisibilityClusters with large visibility variance for improving accuracy. We demonstrate that, with the proposed method, visibility can be incorporated into importance sampling at a reasonable cost for the many-light problem, significantly reducing variance in Monte Carlo rendering. In addition, the proposed method can be used to increase realism of local shading by adding directional occlusion effects. Experiments show that the proposed technique outperforms state-of-the-art importance sampling algorithms, and successfully enhances the preview quality for lighting design.", "title": "VisibilityCluster: Average Directional Visibility for Many-Light Rendering", "normalizedTitle": "VisibilityCluster: Average Directional Visibility for Many-Light Rendering", "fno": "ttg2013091566", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Monte Carlo Methods", "Lighting", "Rendering Computer Graphics", "Approximation Methods", "Approximation Algorithms", "Coherence", "Geometry", "The Many Light Problem", "Ray Tracing", "Visibility Approximation", "Importance Sampling" ], "authors": [ { "givenName": null, "surname": "Yu-Ting Wu", "fullName": "Yu-Ting Wu", "affiliation": "Dept. of Comput. Sci. & Inf. Eng., Nat. Taiwan Univ., Taipei, Taiwan", "__typename": "ArticleAuthorType" }, { "givenName": null, "surname": "Yung-Yu Chuang", "fullName": "Yung-Yu Chuang", "affiliation": "Dept. of Comput. Sci. & Inf. Eng., Nat. Taiwan Univ., Taipei, Taiwan", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "09", "pubDate": "2013-09-01 00:00:00", "pubType": "trans", "pages": "1566-1578", "year": "2013", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cw/2016/2303/0/2303a057", "title": "Interactive Screenspace Stream-Compaction Fragment Rendering of Direct Illumination from Area Lights", "doi": null, "abstractUrl": "/proceedings-article/cw/2016/2303a057/12OmNCdk2W8", "parentPublication": { "id": "proceedings/cw/2016/2303/0", "title": "2016 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2001/1330/0/13300138", "title": "A Shading Model for Image-Based Rendering", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2001/13300138/12OmNvJXeCI", "parentPublication": { "id": "proceedings/sibgrapi/2001/1330/0", "title": "Proceedings XIV Brazilian Symposium on Computer Graphics and Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacific-graphics/2010/4205/0/4205a054", "title": "Thread-Based BRDF Rendering on GPU", "doi": null, "abstractUrl": "/proceedings-article/pacific-graphics/2010/4205a054/12OmNweBUCO", "parentPublication": { "id": "proceedings/pacific-graphics/2010/4205/0", "title": "Pacific Conference on Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2011/348/0/06011856", "title": "Real-time rendering with complex natural illumination", "doi": null, "abstractUrl": "/proceedings-article/icme/2011/06011856/12OmNweTvQm", "parentPublication": { "id": "proceedings/icme/2011/348/0", "title": "2011 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761661", "title": "A layered method of visibility resolving in depth image-based rendering", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761661/12OmNy5hRhC", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/03/ttg2012030447", "title": "Efficient Visibility Encoding for Dynamic Illumination in Direct Volume Rendering", "doi": null, "abstractUrl": "/journal/tg/2012/03/ttg2012030447/13rRUxAATgu", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/09/07562447", "title": "Efficient Stochastic Rendering of Static and Animated Volumes Using Visibility Sweeps", "doi": null, "abstractUrl": "/journal/tg/2017/09/07562447/13rRUxly8T2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pvg/2003/2091/0/01249035", "title": "Visibility-based prefetching for interactive out-of-core rendering", "doi": null, "abstractUrl": "/proceedings-article/pvg/2003/01249035/1h0S13AbcoU", "parentPublication": { "id": "proceedings/pvg/2003/2091/0", "title": "Parallel and Large-Data Visualization and Graphics, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/10/09113332", "title": "Stochastic Lightcuts for Sampling Many Lights", "doi": null, "abstractUrl": "/journal/tg/2021/10/09113332/1kxX2rlqpDa", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/10/09380921", "title": "Adaptive Irradiance Sampling for Many-Light Rendering of Subsurface Scattering", "doi": null, "abstractUrl": "/journal/tg/2022/10/09380921/1s2GgiOWWt2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2013091552", "articleId": "13rRUxAAT7E", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2013091579", "articleId": "13rRUxZzAhG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgDW", "name": "ttg2013091566s.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2013091566s.pdf", "extension": "pdf", "size": "10.2 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZpr3", "title": "Sept.", "year": "2013", "issueNum": "09", "idPrefix": "tg", "pubType": "journal", "volume": "19", "label": "Sept.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxZzAhG", "doi": "10.1109/TVCG.2013.18", "abstract": "Characterizing the interplay between the vortices and forces acting on a wind turbine's blades in a qualitative and quantitative way holds the potential for significantly improving large wind turbine design. This paper introduces an integrated pipeline for highly effective wind and force field analysis and visualization. We extract vortices induced by a turbine's rotation in a wind field, and characterize vortices in conjunction with numerically simulated forces on the blade surfaces as these vortices strike another turbine's blades downstream. The scientifically relevant issue to be studied is the relationship between the extracted, approximate locations on the blades where vortices strike the blades and the forces that exist in those locations. This integrated approach is used to detect and analyze turbulent flow that causes local impact on the wind turbine blade structure. The results that we present are based on analyzing the wind and force field data sets generated by numerical simulations, and allow domain scientists to relate vortex-blade interactions with power output loss in turbines and turbine life expectancy. Our methods have the potential to improve turbine design to save costs related to turbine operation and maintenance.", "abstracts": [ { "abstractType": "Regular", "content": "Characterizing the interplay between the vortices and forces acting on a wind turbine's blades in a qualitative and quantitative way holds the potential for significantly improving large wind turbine design. This paper introduces an integrated pipeline for highly effective wind and force field analysis and visualization. We extract vortices induced by a turbine's rotation in a wind field, and characterize vortices in conjunction with numerically simulated forces on the blade surfaces as these vortices strike another turbine's blades downstream. The scientifically relevant issue to be studied is the relationship between the extracted, approximate locations on the blades where vortices strike the blades and the forces that exist in those locations. This integrated approach is used to detect and analyze turbulent flow that causes local impact on the wind turbine blade structure. The results that we present are based on analyzing the wind and force field data sets generated by numerical simulations, and allow domain scientists to relate vortex-blade interactions with power output loss in turbines and turbine life expectancy. Our methods have the potential to improve turbine design to save costs related to turbine operation and maintenance.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Characterizing the interplay between the vortices and forces acting on a wind turbine's blades in a qualitative and quantitative way holds the potential for significantly improving large wind turbine design. This paper introduces an integrated pipeline for highly effective wind and force field analysis and visualization. We extract vortices induced by a turbine's rotation in a wind field, and characterize vortices in conjunction with numerically simulated forces on the blade surfaces as these vortices strike another turbine's blades downstream. The scientifically relevant issue to be studied is the relationship between the extracted, approximate locations on the blades where vortices strike the blades and the forces that exist in those locations. This integrated approach is used to detect and analyze turbulent flow that causes local impact on the wind turbine blade structure. The results that we present are based on analyzing the wind and force field data sets generated by numerical simulations, and allow domain scientists to relate vortex-blade interactions with power output loss in turbines and turbine life expectancy. Our methods have the potential to improve turbine design to save costs related to turbine operation and maintenance.", "title": "Visualization and Analysis of Vortex-Turbine Intersections in Wind Farms", "normalizedTitle": "Visualization and Analysis of Vortex-Turbine Intersections in Wind Farms", "fno": "ttg2013091579", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Blades", "Wind Turbines", "Data Visualization", "Feature Extraction", "Force", "Geometry", "Vortices", "Flow Visualization", "Applications", "Wind Energy", "Turbulence" ], "authors": [ { "givenName": "S.", "surname": "Shafii", "fullName": "S. Shafii", "affiliation": "Dept. of Comput. Sci., Univ. of California, Davis, Davis, CA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "H.", "surname": "Obermaier", "fullName": "H. Obermaier", "affiliation": "Dept. of Comput. Sci., Univ. of California, Davis, Davis, CA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "R.", "surname": "Linn", "fullName": "R. Linn", "affiliation": "Comput. Earth Sci. Group (EES-16), Los Alamos Nat. Lab., Los Alamos, NM, USA", "__typename": "ArticleAuthorType" }, { "givenName": null, "surname": "Eunmo Koo", "fullName": "Eunmo Koo", "affiliation": "Comput. Earth Sci. Group (EES-16), Los Alamos Nat. Lab., Los Alamos, NM, USA", "__typename": "ArticleAuthorType" }, { "givenName": "M.", "surname": "Hlawitschka", "fullName": "M. Hlawitschka", "affiliation": "Deutschland, Univ. Leipzig, Leipzig, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "C.", "surname": "Garth", "fullName": "C. Garth", "affiliation": "Fachbereich Inf., Tech. Univ. Kaiserslautern, Kaiserslautern, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "B.", "surname": "Hamann", "fullName": "B. Hamann", "affiliation": "Dept. of Comput. Sci., Univ. of California, Davis, Davis, CA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "K. I.", "surname": "Joy", "fullName": "K. I. Joy", "affiliation": "Dept. of Comput. Sci., Univ. of California, Davis, Davis, CA, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "09", "pubDate": "2013-09-01 00:00:00", "pubType": "trans", "pages": "1579-1591", "year": "2013", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/aqtr/2018/2205/0/08402770", "title": "Analysis of a new horizontal axes wind turbine with 6/3 blades", "doi": null, "abstractUrl": "/proceedings-article/aqtr/2018/08402770/12OmNBl6ENb", "parentPublication": { "id": "proceedings/aqtr/2018/2205/0", "title": "2018 IEEE International Conference on Automation, Quality and Testing, Robotics (AQTR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdma/2011/4455/0/4455a113", "title": "A Status of Study on Icing of Wind Turbine Blades", "doi": null, "abstractUrl": "/proceedings-article/icdma/2011/4455a113/12OmNrY3LzX", "parentPublication": { "id": "proceedings/icdma/2011/4455/0", "title": "2011 Second International Conference on Digital Manufacturing & Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisce/2017/3013/0/3013a488", "title": "Numerical Study of Resistance Type Vertical Axis Wind Turbine with Soft Blades", "doi": null, "abstractUrl": "/proceedings-article/icisce/2017/3013a488/12OmNvqEvPA", "parentPublication": { "id": "proceedings/icisce/2017/3013/0", "title": "2017 4th International Conference on Information Science and Control Engineering (ICISCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2015/7143/0/7143a973", "title": "Shape Design and Aerodynamic Characteristics of Wind Turbine Blades Based on Energy Cost", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2015/7143a973/12OmNwdtw95", "parentPublication": { "id": "proceedings/icmtma/2015/7143/0", "title": "2015 Seventh International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/issst/2010/7094/0/05507724", "title": "Comparative life cycle assessment: Reinforcing wind turbine blades with carbon nanofibers", "doi": null, "abstractUrl": "/proceedings-article/issst/2010/05507724/12OmNxecS74", "parentPublication": { "id": "proceedings/issst/2010/7094/0", "title": "IEEE International Symposium on Sustainable Systems and Technology (ISSST 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/uic-atc-scalcom/2015/7211/0/07518327", "title": "Review of Pitch Control for Variable Speed Wind Turbine", "doi": null, "abstractUrl": "/proceedings-article/uic-atc-scalcom/2015/07518327/12OmNySG3Pu", "parentPublication": { "id": "proceedings/uic-atc-scalcom/2015/7211/0", "title": "2015 IEEE 12th Intl Conf on Ubiquitous Intelligence and Computing and 2015 IEEE 12th Intl Conf on Autonomic and Trusted Computing and 2015 IEEE 15th Intl Conf on Scalable Computing and Communications and Its Associated Workshops (UIC-ATC-ScalCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csde/2021/9552/0/09718406", "title": "Modelling Analysis of Savonius and Darrieus Wind Turbine Models Using SolidWorks Flow Simulation", "doi": null, "abstractUrl": "/proceedings-article/csde/2021/09718406/1Boh30djBAY", "parentPublication": { "id": "proceedings/csde/2021/9552/0", "title": "2021 IEEE Asia-Pacific Conference on Computer Science and Data Engineering (CSDE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aiam/2021/1732/0/173200a597", "title": "Influence of Blade Tip Structure Change on Aerodynamic Noise of Wind Turbine under Yaw", "doi": null, "abstractUrl": "/proceedings-article/aiam/2021/173200a597/1BzTMRqAxBC", "parentPublication": { "id": "proceedings/aiam/2021/1732/0", "title": "2021 3rd International Conference on Artificial Intelligence and Advanced Manufacture (AIAM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmcce/2019/4689/0/468900a026", "title": "Wind Turbine Blade Icing Prediction Based on Deep Belief Network", "doi": null, "abstractUrl": "/proceedings-article/icmcce/2019/468900a026/1h0Fbf2afra", "parentPublication": { "id": "proceedings/icmcce/2019/4689/0", "title": "2019 4th International Conference on Mechanical, Control and Computer Engineering (ICMCCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icitbs/2021/4854/0/485400a278", "title": "Effect of Air Density on the Fatigue Life of Wind Turbine Blades", "doi": null, "abstractUrl": "/proceedings-article/icitbs/2021/485400a278/1wB6Qza95ZK", "parentPublication": { "id": "proceedings/icitbs/2021/4854/0", "title": "2021 International Conference on Intelligent Transportation, Big Data & Smart City (ICITBS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2013091566", "articleId": "13rRUwInvsP", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2013091592", "articleId": "13rRUNvyatj", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZpr3", "title": "Sept.", "year": "2013", "issueNum": "09", "idPrefix": "tg", "pubType": "journal", "volume": "19", "label": "Sept.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUNvyatj", "doi": "10.1109/TVCG.2013.8", "abstract": "This paper presents a simple, three stage method to simulate the mechanics of wetting of porous solid objects, like sponges and cloth, when they interact with a fluid. In the first stage, we model the absorption of fluid by the object when it comes in contact with the fluid. In the second stage, we model the transport of absorbed fluid inside the object, due to diffusion, as a flow in a deforming, unstructured mesh. The fluid diffuses within the object depending on saturation of its various parts and other body forces. Finally, in the third stage, oversaturated parts of the object shed extra fluid by dripping. The simulation model is motivated by the physics of imbibition of fluids into porous solids in the presence of gravity. It is phenomenologically capable of simulating wicking and imbibition, dripping, surface flows over wet media, material weakening, and volume expansion due to wetting. The model is inherently mass conserving and works for both thin 2D objects like cloth and for 3D volumetric objects like sponges. It is also designed to be computationally efficient and can be easily added to existing cloth, soft body, and fluid simulation pipelines.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a simple, three stage method to simulate the mechanics of wetting of porous solid objects, like sponges and cloth, when they interact with a fluid. In the first stage, we model the absorption of fluid by the object when it comes in contact with the fluid. In the second stage, we model the transport of absorbed fluid inside the object, due to diffusion, as a flow in a deforming, unstructured mesh. The fluid diffuses within the object depending on saturation of its various parts and other body forces. Finally, in the third stage, oversaturated parts of the object shed extra fluid by dripping. The simulation model is motivated by the physics of imbibition of fluids into porous solids in the presence of gravity. It is phenomenologically capable of simulating wicking and imbibition, dripping, surface flows over wet media, material weakening, and volume expansion due to wetting. The model is inherently mass conserving and works for both thin 2D objects like cloth and for 3D volumetric objects like sponges. It is also designed to be computationally efficient and can be easily added to existing cloth, soft body, and fluid simulation pipelines.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a simple, three stage method to simulate the mechanics of wetting of porous solid objects, like sponges and cloth, when they interact with a fluid. In the first stage, we model the absorption of fluid by the object when it comes in contact with the fluid. In the second stage, we model the transport of absorbed fluid inside the object, due to diffusion, as a flow in a deforming, unstructured mesh. The fluid diffuses within the object depending on saturation of its various parts and other body forces. Finally, in the third stage, oversaturated parts of the object shed extra fluid by dripping. The simulation model is motivated by the physics of imbibition of fluids into porous solids in the presence of gravity. It is phenomenologically capable of simulating wicking and imbibition, dripping, surface flows over wet media, material weakening, and volume expansion due to wetting. The model is inherently mass conserving and works for both thin 2D objects like cloth and for 3D volumetric objects like sponges. It is also designed to be computationally efficient and can be easily added to existing cloth, soft body, and fluid simulation pipelines.", "title": "Wetting of Porous Solids", "normalizedTitle": "Wetting of Porous Solids", "fno": "ttg2013091592", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Solids", "Solid Modeling", "Computational Modeling", "Mathematical Model", "Gravity", "Absorption", "Deformable Models", "Flow Through Porous Solids", "Mechanics Of Wetting" ], "authors": [ { "givenName": "S.", "surname": "Patkar", "fullName": "S. Patkar", "affiliation": "Dept. of Comput. Sci., Stanford Univ., Stanford, CA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "P.", "surname": "Chaudhuri", "fullName": "P. Chaudhuri", "affiliation": "Dept. of Comput. Sci. & Eng., Indian Inst. of Technol. Bombay, Mumbai, India", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "09", "pubDate": "2013-09-01 00:00:00", "pubType": "trans", "pages": "1592-1604", "year": "2013", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cgames/2013/0820/0/06632624", "title": "Real-time rendering of melting objects in video games", "doi": null, "abstractUrl": "/proceedings-article/cgames/2013/06632624/12OmNCwCLmW", "parentPublication": { "id": "proceedings/cgames/2013/0820/0", "title": "2013 18th International Conference on Computer Games: AI, Animation, Mobile, Interactive Multimedia, Educational & Serious Games (CGAMES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hipcw/2015/8717/0/8717a002", "title": "On the Navier-Slip Boundary Condition for Computations of Impinging Droplets", "doi": null, "abstractUrl": "/proceedings-article/hipcw/2015/8717a002/12OmNqNXEqa", "parentPublication": { "id": "proceedings/hipcw/2015/8717/0", "title": "2015 IEEE 22nd International Conference on High Performance Computing Workshops (HiPCW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2014/6854/0/6854a327", "title": "Simulation of Cloth with Low Stretch", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2014/6854a327/12OmNrY3Lx1", "parentPublication": { "id": "proceedings/icvrv/2014/6854/0", "title": "2014 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iqec/2005/9240/0/01561175", "title": "Formation of multiple filaments in transparent solids", "doi": null, "abstractUrl": "/proceedings-article/iqec/2005/01561175/12OmNvlg8hP", "parentPublication": { "id": "proceedings/iqec/2005/9240/0", "title": "International Quantum Electronics Conference, 2005.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hipcw/2016/5773/0/07837061", "title": "Molecular Dynamics Simulation of Nanoscopic Couette Flow and Lubricated Nanoindentation", "doi": null, "abstractUrl": "/proceedings-article/hipcw/2016/07837061/12OmNxXCGI0", "parentPublication": { "id": "proceedings/hipcw/2016/5773/0", "title": "2016 IEEE 23rd International Conference on High-Performance Computing: Workshops (HiPCW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gmap/2000/0562/0/00838239", "title": "Effect of fabric properties on cloth draping modeling", "doi": null, "abstractUrl": "/proceedings-article/gmap/2000/00838239/12OmNxzuMKD", "parentPublication": { "id": "proceedings/gmap/2000/0562/0", "title": "Proceedings Geometric Modeling and Processing 2000. Theory and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2017/06/mcg2017060052", "title": "Cloth Animation Retrieval Using a Motion-Shape Signature", "doi": null, "abstractUrl": "/magazine/cg/2017/06/mcg2017060052/13rRUyoPSRD", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icris/2019/2632/0/263200a436", "title": "Optimization of Fabric Deformation Based on Wind Field Simulation Based by Bernoulli Equation", "doi": null, "abstractUrl": "/proceedings-article/icris/2019/263200a436/1cI6oc9vSta", "parentPublication": { "id": "proceedings/icris/2019/2632/0", "title": "2019 International Conference on Robots & Intelligent System (ICRIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/07/09266101", "title": "Computational Design of Self-Actuated Deformable Solids via Shape Memory Material", "doi": null, "abstractUrl": "/journal/tg/2022/07/09266101/1oZxGnBQoZa", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ifeea/2020/9627/0/962700a816", "title": "Theoretical Research of Wetting Transition from Cassie State to Wenzel State", "doi": null, "abstractUrl": "/proceedings-article/ifeea/2020/962700a816/1rvCz46IpdS", "parentPublication": { "id": "proceedings/ifeea/2020/9627/0", "title": "2020 7th International Forum on Electrical Engineering and Automation (IFEEA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2013091579", "articleId": "13rRUxZzAhG", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXWRHD", "name": "ttg2013091592s2.avi", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2013091592s2.avi", "extension": "avi", "size": "10.5 MB", "__typename": "WebExtraType" }, { "id": "17ShDTXWRHC", "name": "ttg2013091592s1.avi", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2013091592s1.avi", "extension": "avi", "size": "27.2 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNzvQI13", "title": "Oct.", "year": "2020", "issueNum": "10", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "Oct.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "18NkfcOzr9u", "doi": "10.1109/TVCG.2019.2908363", "abstract": "Image colorization refers to a computer-assisted process that adds colors to grayscale images. It is a challenging task since there is usually no one-to-one correspondence between color and local texture. In this paper, we tackle this issue by exploiting weighted nonlocal self-similarity and local consistency constraints at the resolution of superpixels. Given a grayscale target image, we first select a color source image containing similar segments to target image and extract multi-level features of each superpixel in both images after superpixel segmentation. Then a set of color candidates for each target superpixel is selected by adopting a top-down feature matching scheme with confidence assignment. Finally, we propose a variational approach to determine the most appropriate color for each target superpixel from color candidates. Experiments demonstrate the effectiveness of the proposed method and show its superiority to other state-of-the-art methods. Furthermore, our method can be easily extended to color transfer between two color images.", "abstracts": [ { "abstractType": "Regular", "content": "Image colorization refers to a computer-assisted process that adds colors to grayscale images. It is a challenging task since there is usually no one-to-one correspondence between color and local texture. In this paper, we tackle this issue by exploiting weighted nonlocal self-similarity and local consistency constraints at the resolution of superpixels. Given a grayscale target image, we first select a color source image containing similar segments to target image and extract multi-level features of each superpixel in both images after superpixel segmentation. Then a set of color candidates for each target superpixel is selected by adopting a top-down feature matching scheme with confidence assignment. Finally, we propose a variational approach to determine the most appropriate color for each target superpixel from color candidates. Experiments demonstrate the effectiveness of the proposed method and show its superiority to other state-of-the-art methods. Furthermore, our method can be easily extended to color transfer between two color images.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Image colorization refers to a computer-assisted process that adds colors to grayscale images. It is a challenging task since there is usually no one-to-one correspondence between color and local texture. In this paper, we tackle this issue by exploiting weighted nonlocal self-similarity and local consistency constraints at the resolution of superpixels. Given a grayscale target image, we first select a color source image containing similar segments to target image and extract multi-level features of each superpixel in both images after superpixel segmentation. Then a set of color candidates for each target superpixel is selected by adopting a top-down feature matching scheme with confidence assignment. Finally, we propose a variational approach to determine the most appropriate color for each target superpixel from color candidates. Experiments demonstrate the effectiveness of the proposed method and show its superiority to other state-of-the-art methods. Furthermore, our method can be easily extended to color transfer between two color images.", "title": "A Superpixel-Based Variational Model for Image Colorization", "normalizedTitle": "A Superpixel-Based Variational Model for Image Colorization", "fno": "08676327", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Feature Extraction", "Image Colour Analysis", "Image Matching", "Image Segmentation", "Image Texture", "Superpixel Based Variational Model", "Image Colorization", "Computer Assisted Process", "Local Texture", "Weighted Nonlocal Self Similarity", "Grayscale Target Image", "Color Source Image", "Superpixel Segmentation", "Local Consistency Constraints", "Top Down Feature Matching Scheme", "Image Color Analysis", "Feature Extraction", "Image Segmentation", "Gray Scale", "Histograms", "Color", "Image Edge Detection", "Example Based Image Colorization", "Superpixel Segmentation", "Variational Model", "ADMM" ], "authors": [ { "givenName": "Faming", "surname": "Fang", "fullName": "Faming Fang", "affiliation": "Shanghai Key Laboratory of Multidimensional Information Processing, and Department of Computer Science & Technology, East China Normal University, Shanghai, China", "__typename": "ArticleAuthorType" }, { "givenName": "Tingting", "surname": "Wang", "fullName": "Tingting Wang", "affiliation": "Shanghai Key Laboratory of Multidimensional Information Processing, and Department of Computer Science & Technology, East China Normal University, Shanghai, China", "__typename": "ArticleAuthorType" }, { "givenName": "Tieyong", "surname": "Zeng", "fullName": "Tieyong Zeng", "affiliation": "Department of Mathematics, The Chinese University of Hong Kong, Shatin, NT, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Guixu", "surname": "Zhang", "fullName": "Guixu Zhang", "affiliation": "Shanghai Key Laboratory of Multidimensional Information Processing, and Department of Computer Science & Technology, East China Normal University, Shanghai, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "10", "pubDate": "2020-10-01 00:00:00", "pubType": "trans", "pages": "2931-2943", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2015/8391/0/8391a415", "title": "Deep Colorization", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391a415/12OmNBNM93v", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460810", "title": "Patch-based image colorization", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460810/12OmNBigFy1", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2012/4829/0/4829a032", "title": "Colorization by Multidimensional Projection", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2012/4829a032/12OmNBsLPdX", "parentPublication": { "id": "proceedings/sibgrapi/2012/4829/0", "title": "2012 25th SIBGRAPI Conference on Graphics, Patterns and Images", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2017/3586/1/3586a929", "title": "Scene Text Detection with Novel Superpixel Based Character Candidate Extraction", "doi": null, "abstractUrl": "/proceedings-article/icdar/2017/3586a929/12OmNxA3YRm", "parentPublication": { "id": "proceedings/icdar/2017/3586/1", "title": "2017 14th IAPR International Conference on Document Analysis and Recognition (ICDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccea/2010/3982/2/3982b196", "title": "Color Image to Grayscale Image Conversion", "doi": null, "abstractUrl": "/proceedings-article/iccea/2010/3982b196/12OmNzV70CO", "parentPublication": { "id": "proceedings/iccea/2010/3982/2", "title": "Computer Engineering and Applications, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093389", "title": "ChromaGAN: Adversarial Picture Colorization with Semantic Class Distribution", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093389/1jPbfLAnmEg", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800h965", "title": "Instance-Aware Image Colorization", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800h965/1m3nNZyhYXe", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/03/09186041", "title": "Interactive Deep Colorization and its Application for Image Compression", "doi": null, "abstractUrl": "/journal/tg/2022/03/09186041/1mP2JjLRhDy", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccst/2020/8138/0/813800a435", "title": "Cartoon image colorization based on emotion recognition and superpixel color resolution", "doi": null, "abstractUrl": "/proceedings-article/iccst/2020/813800a435/1p1gtwbDSH6", "parentPublication": { "id": "proceedings/iccst/2020/8138/0", "title": "2020 International Conference on Culture-oriented Science & Technology (ICCST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2020/9234/0/923400a001", "title": "Automatic Image Colorization via Weighted Sparse Representation Learning", "doi": null, "abstractUrl": "/proceedings-article/icdh/2020/923400a001/1uGXZvzfk4w", "parentPublication": { "id": "proceedings/icdh/2020/9234/0", "title": "2020 8th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": null, "next": { "fno": "08673661", "articleId": "18LF7Q1L3na", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzvQI13", "title": "Oct.", "year": "2020", "issueNum": "10", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "Oct.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "18LF7Q1L3na", "doi": "10.1109/TVCG.2019.2906900", "abstract": "The visualization of evolutionary influence graphs is important for performing many real-life tasks such as citation analysis and social influence analysis. The main challenges include how to summarize large-scale, complex, and time-evolving influence graphs, and how to design effective visual metaphors and dynamic representation methods to illustrate influence patterns over time. In this work, we present Eiffel, an integrated visual analytics system that applies triple summarizations on evolutionary influence graphs in the nodal, relational, and temporal dimensions. In numerical experiments, Eiffel summarization results outperformed those of traditional clustering algorithms with respect to the influence-flow-based objective. Moreover, a flow map representation is proposed and adapted to the case of influence graph summarization, which supports two modes of evolutionary visualization (i.e., flip-book and movie) to expedite the analysis of influence graph dynamics. We conducted two controlled user experiments to evaluate our technique on influence graph summarization and visualization respectively. We also showcased the system in the evolutionary influence analysis of two typical scenarios, the citation influence of scientific papers and the social influence of emerging online events. The evaluation results demonstrate the value of Eiffel in the visual analysis of evolutionary influence graphs.", "abstracts": [ { "abstractType": "Regular", "content": "The visualization of evolutionary influence graphs is important for performing many real-life tasks such as citation analysis and social influence analysis. The main challenges include how to summarize large-scale, complex, and time-evolving influence graphs, and how to design effective visual metaphors and dynamic representation methods to illustrate influence patterns over time. In this work, we present Eiffel, an integrated visual analytics system that applies triple summarizations on evolutionary influence graphs in the nodal, relational, and temporal dimensions. In numerical experiments, Eiffel summarization results outperformed those of traditional clustering algorithms with respect to the influence-flow-based objective. Moreover, a flow map representation is proposed and adapted to the case of influence graph summarization, which supports two modes of evolutionary visualization (i.e., flip-book and movie) to expedite the analysis of influence graph dynamics. We conducted two controlled user experiments to evaluate our technique on influence graph summarization and visualization respectively. We also showcased the system in the evolutionary influence analysis of two typical scenarios, the citation influence of scientific papers and the social influence of emerging online events. The evaluation results demonstrate the value of Eiffel in the visual analysis of evolutionary influence graphs.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The visualization of evolutionary influence graphs is important for performing many real-life tasks such as citation analysis and social influence analysis. The main challenges include how to summarize large-scale, complex, and time-evolving influence graphs, and how to design effective visual metaphors and dynamic representation methods to illustrate influence patterns over time. In this work, we present Eiffel, an integrated visual analytics system that applies triple summarizations on evolutionary influence graphs in the nodal, relational, and temporal dimensions. In numerical experiments, Eiffel summarization results outperformed those of traditional clustering algorithms with respect to the influence-flow-based objective. Moreover, a flow map representation is proposed and adapted to the case of influence graph summarization, which supports two modes of evolutionary visualization (i.e., flip-book and movie) to expedite the analysis of influence graph dynamics. We conducted two controlled user experiments to evaluate our technique on influence graph summarization and visualization respectively. We also showcased the system in the evolutionary influence analysis of two typical scenarios, the citation influence of scientific papers and the social influence of emerging online events. The evaluation results demonstrate the value of Eiffel in the visual analysis of evolutionary influence graphs.", "title": "Eiffel: Evolutionary Flow Map for Influence Graph Visualization", "normalizedTitle": "Eiffel: Evolutionary Flow Map for Influence Graph Visualization", "fno": "08673661", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Citation Analysis", "Data Visualisation", "Evolutionary Computation", "Graph Theory", "Pattern Clustering", "Influence Flow Based Objective", "Flow Map Representation", "Influence Graph Summarization", "Evolutionary Visualization", "Influence Graph Dynamics", "Evolutionary Influence Analysis", "Citation Influence", "Eiffel Summarization Results", "Integrated Visual Analytics System", "Influence Patterns", "Effective Visual Metaphors", "Time Evolving Influence Graphs", "Social Influence Analysis", "Influence Graph Visualization", "Evolutionary Flow Map", "Evolutionary Influence Graphs", "Visual Analysis", "Visualization", "Task Analysis", "Data Visualization", "Citation Analysis", "Layout", "Twitter", "Clutter", "Influence Graph", "Dynamic Visualization", "Citation Analysis" ], "authors": [ { "givenName": "Yucheng", "surname": "Huang", "fullName": "Yucheng Huang", "affiliation": "State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Lei", "surname": "Shi", "fullName": "Lei Shi", "affiliation": "Beijing Advanced Innovation Center for Big Data and Brain Computing, School of Computer Science and Engineering, Beihang University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yue", "surname": "Su", "fullName": "Yue Su", "affiliation": "State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences and UCAS, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yifan", "surname": "Hu", "fullName": "Yifan Hu", "affiliation": "Yahoo Labs, New York, NY, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Hanghang", "surname": "Tong", "fullName": "Hanghang Tong", "affiliation": "School of Computing, Informatics, Decision Systems Engineering, Arizona State University, Tempe, AZ, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Chaoli", "surname": "Wang", "fullName": "Chaoli Wang", "affiliation": "Department of Computer Science & Engineering, University of Notre Dame, Notre Dame, IN, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Tong", "surname": "Yang", "fullName": "Tong Yang", "affiliation": "Department of Computer Science, Peking University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Deyun", "surname": "Wang", "fullName": "Deyun Wang", "affiliation": "State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences and UCAS, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Shuo", "surname": "Liang", "fullName": "Shuo Liang", "affiliation": "Academy of Arts & Design, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "10", "pubDate": "2020-10-01 00:00:00", "pubType": "trans", "pages": "2944-2960", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icde/2016/2020/0/07498314", "title": "TOPIC: Toward perfect Influence Graph Summarization", "doi": null, "abstractUrl": "/proceedings-article/icde/2016/07498314/12OmNAoUTf2", "parentPublication": { "id": "proceedings/icde/2016/2020/0", "title": "2016 IEEE 32nd International Conference on Data Engineering (ICDE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2016/5661/0/07883510", "title": "D-Map: Visual analysis of ego-centric information diffusion patterns in social media", "doi": null, "abstractUrl": "/proceedings-article/vast/2016/07883510/12OmNBKmXoU", "parentPublication": { "id": "proceedings/vast/2016/5661/0", "title": "2016 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/asonam/2016/2846/0/07752390", "title": "Evolutionary algorithm for seed selection in social influence process", "doi": null, "abstractUrl": "/proceedings-article/asonam/2016/07752390/12OmNCmGNMU", "parentPublication": { "id": "proceedings/asonam/2016/2846/0", "title": "2016 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2013/3211/0/3211a491", "title": "Ranking Twitter Influence by Combining Network Centrality and Influence Observables in an Evolutionary Model", "doi": null, "abstractUrl": "/proceedings-article/sitis/2013/3211a491/12OmNqFrGF1", "parentPublication": { "id": "proceedings/sitis/2013/3211/0", "title": "2013 International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/snpd/2017/5504/0/08022712", "title": "The influence of Twitter on education policy making", "doi": null, "abstractUrl": "/proceedings-article/snpd/2017/08022712/12OmNrYlmVk", "parentPublication": { "id": "proceedings/snpd/2017/5504/0", "title": "2017 18th IEEE/ACIS International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel/Distributed Computing (SNPD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2014/4302/0/4302a983", "title": "Flow-Based Influence Graph Visual Summarization", "doi": null, "abstractUrl": "/proceedings-article/icdm/2014/4302a983/12OmNvKePHD", "parentPublication": { "id": "proceedings/icdm/2014/4302/0", "title": "2014 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2015/9926/0/07364014", "title": "Discovering time-evolving influence from dynamic heterogeneous graphs", "doi": null, "abstractUrl": "/proceedings-article/big-data/2015/07364014/12OmNxGja0x", "parentPublication": { "id": "proceedings/big-data/2015/9926/0", "title": "2015 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2015/8493/0/8493b652", "title": "Influence Visualization of Scientific Paper through Flow-Based Citation Network Summarization", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2015/8493b652/12OmNxvwp2r", "parentPublication": { "id": "proceedings/icdmw/2015/8493/0", "title": "2015 IEEE International Conference on Data Mining Workshop (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/paap/2015/9117/0/9117a177", "title": "Measuring User Influence Based on Multiple Metrics on YouTube", "doi": null, "abstractUrl": "/proceedings-article/paap/2015/9117a177/12OmNyq0zOO", "parentPublication": { "id": "proceedings/paap/2015/9117/0", "title": "2015 Seventh International Symposium on Parallel Architectures, Algorithms and Programming (PAAP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2015/12/07152908", "title": "VEGAS: Visual influEnce GrAph Summarization on Citation Networks", "doi": null, "abstractUrl": "/journal/tk/2015/12/07152908/13rRUxBa5co", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08676327", "articleId": "18NkfcOzr9u", "__typename": "AdjacentArticleType" }, "next": { "fno": "08684333", "articleId": "1keqXrXysr6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1mLI1dM7MfS", "name": "ttg202010-08673661s1-tvcg-shi-2906900-mm.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202010-08673661s1-tvcg-shi-2906900-mm.zip", "extension": "zip", "size": "45 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNzvQI13", "title": "Oct.", "year": "2020", "issueNum": "10", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "Oct.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1keqXrXysr6", "doi": "10.1109/TVCG.2019.2909875", "abstract": "Many materials combine a refractive boundary and a participating media on the interior. If the material has a low opacity, single scattering effects dominate in its appearance. Refraction at the boundary concentrates the incoming light, resulting in an important phenomenon called volume caustics. This phenomenon is hard to simulate. Previous methods used point-based light transport, but attributed point samples inefficiently, resulting in long computation time. In this paper, we use frequency analysis of light transport to allocate point samples efficiently. Our method works in two steps: in the first step, we compute volume samples along with their covariance matrices, encoding the illumination frequency content in a compact way. In the rendering step, we use the covariance matrices to compute the kernel size for each volume sample: small kernel for high-frequency single scattering, large kernel for lower frequencies. Our algorithm computes volume caustics with fewer volume samples, with no loss of quality. Our method is both faster and uses less memory than the original method. It is roughly twice as fast and uses one fifth of the memory. The extra cost of computing covariance matrices for frequency information is negligible.", "abstracts": [ { "abstractType": "Regular", "content": "Many materials combine a refractive boundary and a participating media on the interior. If the material has a low opacity, single scattering effects dominate in its appearance. Refraction at the boundary concentrates the incoming light, resulting in an important phenomenon called volume caustics. This phenomenon is hard to simulate. Previous methods used point-based light transport, but attributed point samples inefficiently, resulting in long computation time. In this paper, we use frequency analysis of light transport to allocate point samples efficiently. Our method works in two steps: in the first step, we compute volume samples along with their covariance matrices, encoding the illumination frequency content in a compact way. In the rendering step, we use the covariance matrices to compute the kernel size for each volume sample: small kernel for high-frequency single scattering, large kernel for lower frequencies. Our algorithm computes volume caustics with fewer volume samples, with no loss of quality. Our method is both faster and uses less memory than the original method. It is roughly twice as fast and uses one fifth of the memory. The extra cost of computing covariance matrices for frequency information is negligible.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Many materials combine a refractive boundary and a participating media on the interior. If the material has a low opacity, single scattering effects dominate in its appearance. Refraction at the boundary concentrates the incoming light, resulting in an important phenomenon called volume caustics. This phenomenon is hard to simulate. Previous methods used point-based light transport, but attributed point samples inefficiently, resulting in long computation time. In this paper, we use frequency analysis of light transport to allocate point samples efficiently. Our method works in two steps: in the first step, we compute volume samples along with their covariance matrices, encoding the illumination frequency content in a compact way. In the rendering step, we use the covariance matrices to compute the kernel size for each volume sample: small kernel for high-frequency single scattering, large kernel for lower frequencies. Our algorithm computes volume caustics with fewer volume samples, with no loss of quality. Our method is both faster and uses less memory than the original method. It is roughly twice as fast and uses one fifth of the memory. The extra cost of computing covariance matrices for frequency information is negligible.", "title": "Fast Computation of Single Scattering in Participating Media with Refractive Boundaries Using Frequency Analysis", "normalizedTitle": "Fast Computation of Single Scattering in Participating Media with Refractive Boundaries Using Frequency Analysis", "fno": "08684333", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Covariance Matrices", "Ray Tracing", "Rendering Computer Graphics", "Volume Caustics", "Frequency Analysis", "Covariance Matrices", "Illumination Frequency Content", "Rendering Step", "Kernel Size", "High Frequency Single Scattering", "Participating Media", "Refractive Boundary", "Low Opacity", "Single Scattering Effects", "Refraction", "Incoming Light", "Point Based Light Transport", "Covariance Matrices", "Scattering", "Media", "Lighting", "Kernel", "Cameras", "Rendering Computer Graphics", "Single Scattering", "Participating Media", "Frequency Analysis", "Covariance Tracing" ], "authors": [ { "givenName": "Yulin", "surname": "Liang", "fullName": "Yulin Liang", "affiliation": "School of Software, Shandong University, Jinan, China", "__typename": "ArticleAuthorType" }, { "givenName": "Beibei", "surname": "Wang", "fullName": "Beibei Wang", "affiliation": "School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Lu", "surname": "Wang", "fullName": "Lu Wang", "affiliation": "School of Software, Shandong University, Jinan, China", "__typename": "ArticleAuthorType" }, { "givenName": "Nicolas", "surname": "Holzschuch", "fullName": "Nicolas Holzschuch", "affiliation": "Inria, CNRS, Grenoble INP, LJK, Univercity Grenoble Alpes, Grenoble, France", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "10", "pubDate": "2020-10-01 00:00:00", "pubType": "trans", "pages": "2961-2969", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cis/2012/4896/0/4896a396", "title": "Fast Multiple Scattering in Participating Media with Beamlet Decomposition", "doi": null, "abstractUrl": "/proceedings-article/cis/2012/4896a396/12OmNwekjJa", "parentPublication": { "id": "proceedings/cis/2012/4896/0", "title": "2012 Eighth International Conference on Computational Intelligence and Security", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2016/0641/0/07477621", "title": "Image set classification by symmetric positive semi-definite matrices", "doi": null, "abstractUrl": "/proceedings-article/wacv/2016/07477621/12OmNx5Yv4o", "parentPublication": { "id": "proceedings/wacv/2016/0641/0", "title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2014/5118/0/06909929", "title": "Covariance Descriptors for 3D Shape Matching and Retrieval", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/06909929/12OmNxj23cY", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rt/2008/2741/0/04634627", "title": "Interactive volumetric shadows in participating media with single-scattering", "doi": null, "abstractUrl": "/proceedings-article/rt/2008/04634627/12OmNyjtNIF", "parentPublication": { "id": "proceedings/rt/2008/2741/0", "title": "Symposium on Interactive Ray Tracing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/01/07194844", "title": "Anisotropic Ambient Volume Shading", "doi": null, "abstractUrl": "/journal/tg/2016/01/07194844/13rRUB7a1fT", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/04/ttg2013040576", "title": "Reflective and Refractive Objects for Mixed Reality", "doi": null, "abstractUrl": "/journal/tg/2013/04/ttg2013040576/13rRUxYINf9", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/10/08093692", "title": "Point-Based Rendering for Homogeneous Participating Media with Refractive Boundaries", "doi": null, "abstractUrl": "/journal/tg/2018/10/08093692/13rRUy0qnGq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/01/08017622", "title": "Interactive Dynamic Volume Illumination with Refraction and Caustics", "doi": null, "abstractUrl": "/journal/tg/2018/01/08017622/13rRUyfKIHU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000f477", "title": "Structured Uncertainty Prediction Networks", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000f477/17D45WWzW3B", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/07/08600345", "title": "Precomputed Multiple Scattering for Rapid Light Simulation in Participating Media", "doi": null, "abstractUrl": "/journal/tg/2020/07/08600345/17D45Xh13tH", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08673661", "articleId": "18LF7Q1L3na", "__typename": "AdjacentArticleType" }, "next": { "fno": "08865648", "articleId": "1e2DipgV9bq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzvQI13", "title": "Oct.", "year": "2020", "issueNum": "10", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "Oct.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1e2DipgV9bq", "doi": "10.1109/TVCG.2019.2945961", "abstract": "We present an efficient, trivially parallelizable algorithm to compute offset surfaces of shapes discretized using a dexel data structure. Our algorithm is based on a two-stage sweeping procedure that is simple to implement and efficient, entirely avoiding volumetric distance field computations typical of existing methods. Our construction is based on properties of half-space power diagrams, where each seed is only visible by a half-space, which were never used before for the computation of surface offsets. The primary application of our method is interactive modeling for digital fabrication. Our technique enables a user to interactively process high-resolution models. It is also useful in a plethora of other geometry processing tasks requiring fast, approximate offsets, such as topology optimization, collision detection, and skeleton extraction. We present experimental timings, comparisons with previous approaches, and provide a reference implementation in the supplemental material, which can be found on the Computer Society Digital Library at http://doi.ieeecomputersociety.org/10.1109/TVCG.2019.2945961.", "abstracts": [ { "abstractType": "Regular", "content": "We present an efficient, trivially parallelizable algorithm to compute offset surfaces of shapes discretized using a dexel data structure. Our algorithm is based on a two-stage sweeping procedure that is simple to implement and efficient, entirely avoiding volumetric distance field computations typical of existing methods. Our construction is based on properties of half-space power diagrams, where each seed is only visible by a half-space, which were never used before for the computation of surface offsets. The primary application of our method is interactive modeling for digital fabrication. Our technique enables a user to interactively process high-resolution models. It is also useful in a plethora of other geometry processing tasks requiring fast, approximate offsets, such as topology optimization, collision detection, and skeleton extraction. We present experimental timings, comparisons with previous approaches, and provide a reference implementation in the supplemental material, which can be found on the Computer Society Digital Library at http://doi.ieeecomputersociety.org/10.1109/TVCG.2019.2945961.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present an efficient, trivially parallelizable algorithm to compute offset surfaces of shapes discretized using a dexel data structure. Our algorithm is based on a two-stage sweeping procedure that is simple to implement and efficient, entirely avoiding volumetric distance field computations typical of existing methods. Our construction is based on properties of half-space power diagrams, where each seed is only visible by a half-space, which were never used before for the computation of surface offsets. The primary application of our method is interactive modeling for digital fabrication. Our technique enables a user to interactively process high-resolution models. It is also useful in a plethora of other geometry processing tasks requiring fast, approximate offsets, such as topology optimization, collision detection, and skeleton extraction. We present experimental timings, comparisons with previous approaches, and provide a reference implementation in the supplemental material, which can be found on the Computer Society Digital Library at http://doi.ieeecomputersociety.org/10.1109/TVCG.2019.2945961.", "title": "Half-Space Power Diagrams and Discrete Surface Offsets", "normalizedTitle": "Half-Space Power Diagrams and Discrete Surface Offsets", "fno": "08865648", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computational Geometry", "Data Structures", "Gaussian Processes", "Image Representation", "Image Resolution", "Image Texture", "Nonlinear Filters", "Rendering Computer Graphics", "Transfer Functions", "High Resolution Models", "Approximate Offsets", "Computer Society Digital Library", "Half Space Power Diagrams", "Discrete Surface Offsets", "Efficient Algorithm", "Trivially Parallelizable Algorithm", "Dexel Data Structure", "Two Stage Sweeping Procedure", "Volumetric Distance Field Computations", "Interactive Modeling", "Topology Optimization", "Surface Morphology", "Shape", "Three Dimensional Displays", "Two Dimensional Displays", "Data Structures", "Computational Modeling", "Fabrication", "Geometry Processing", "Offset", "Voronoi Diagram", "Power Diagram", "Dexels", "Layered Depth Images" ], "authors": [ { "givenName": "Zhen", "surname": "Chen", "fullName": "Zhen Chen", "affiliation": "Mathematics, University of Science and Technology of China, Hefei, Anhui, China", "__typename": "ArticleAuthorType" }, { "givenName": "Daniele", "surname": "Panozzo", "fullName": "Daniele Panozzo", "affiliation": "Computer Science, New York University Courant Institute of Mathematical Sciences, New York, NY, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Jérémie", "surname": "Dumas", "fullName": "Jérémie Dumas", "affiliation": "Computer Science, New York University Courant Institute of Mathematical Sciences, New Yor, NY, USA", "__typename": "ArticleAuthorType" } ], "replicability": { "isEnabled": true, "codeDownloadUrl": "https://github.com/geometryprocessing/voroffset.git", "codeRepositoryUrl": "https://github.com/geometryprocessing/voroffset", "__typename": "ArticleReplicabilityType" }, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "10", "pubDate": "2020-10-01 00:00:00", "pubType": "trans", "pages": "2970-2981", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2016/1437/0/1437a516", "title": "Graph-Constrained Surface Registration Based on Tutte Embedding", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2016/1437a516/12OmNBSjISd", "parentPublication": { "id": "proceedings/cvprw/2016/1437/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761503", "title": "3D face recognition with the average-half-face", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761503/12OmNvmG7Us", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032a764", "title": "Deformable Convolutional Networks", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032a764/12OmNy4r3V7", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2015/8332/0/8332a215", "title": "Half-Occluded Regions and Detection of Pseudoscopy", "doi": null, "abstractUrl": "/proceedings-article/3dv/2015/8332a215/12OmNy5zsvU", "parentPublication": { "id": "proceedings/3dv/2015/8332/0", "title": "2015 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446523", "title": "COP: A New Continuous Packing Layout for 360 VR Videos", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446523/13bd1fKQxs3", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2017/03/mcg2017030052", "title": "SurfCuit: Surface-Mounted Circuits on 3D Prints", "doi": null, "abstractUrl": "/magazine/cg/2017/03/mcg2017030052/13rRUwwslyL", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2018/12/08107582", "title": "Guaranteed Outlier Removal for Point Cloud Registration with Correspondences", "doi": null, "abstractUrl": "/journal/tp/2018/12/08107582/17D45WXIkBn", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2019/03/08292943", "title": "HARD-PnP: PnP Optimization Using a Hybrid Approximate Representation", "doi": null, "abstractUrl": "/journal/tp/2019/03/08292943/17D45XuDNFS", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800a449", "title": "Articulation-Aware Canonical Surface Mapping", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800a449/1m3ncXVXUlO", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbgames/2020/8432/0/843200a001", "title": "A Baseline Approach for Goalkeeper Strategy using Sarsa with Tile Coding on the Half Field Offense Environment", "doi": null, "abstractUrl": "/proceedings-article/sbgames/2020/843200a001/1pQIKAMR6Y8", "parentPublication": { "id": "proceedings/sbgames/2020/8432/0", "title": "2020 19th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08684333", "articleId": "1keqXrXysr6", "__typename": "AdjacentArticleType" }, "next": { "fno": "09123549", "articleId": "1kTxv3ChLeE", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzvQI13", "title": "Oct.", "year": "2020", "issueNum": "10", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "Oct.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1kTxv3ChLeE", "doi": "10.1109/TVCG.2020.3004245", "abstract": "In this article, we present a novel method for the robust handling of static and dynamic rigid boundaries in Smoothed Particle Hydrodynamics (SPH) simulations. We build upon the ideas of the density maps approach which has been introduced recently by Koschier and Bender. They precompute the density contributions of solid boundaries and store them on a spatial grid which can be efficiently queried during runtime. This alleviates the problems of commonly used boundary particles, like bumpy surfaces and inaccurate pressure forces near boundaries. Our method is based on a similar concept but we precompute the volume contribution of the boundary geometry. This maintains all benefits of density maps but offers a variety of advantages which are demonstrated in several experiments. First, in contrast to the density maps method we can compute derivatives in the standard SPH manner by differentiating the kernel function. This results in smooth pressure forces, even for lower map resolutions, such that precomputation times and memory requirements are reduced by more than two orders of magnitude compared to density maps. Furthermore, this directly fits into the SPH concept so that volume maps can be seamlessly combined with existing SPH methods. Finally, the kernel function is not baked into the map such that the same volume map can be used with different kernels. This is especially useful when we want to incorporate common surface tension or viscosity methods that use different kernels than the fluid simulation.", "abstracts": [ { "abstractType": "Regular", "content": "In this article, we present a novel method for the robust handling of static and dynamic rigid boundaries in Smoothed Particle Hydrodynamics (SPH) simulations. We build upon the ideas of the density maps approach which has been introduced recently by Koschier and Bender. They precompute the density contributions of solid boundaries and store them on a spatial grid which can be efficiently queried during runtime. This alleviates the problems of commonly used boundary particles, like bumpy surfaces and inaccurate pressure forces near boundaries. Our method is based on a similar concept but we precompute the volume contribution of the boundary geometry. This maintains all benefits of density maps but offers a variety of advantages which are demonstrated in several experiments. First, in contrast to the density maps method we can compute derivatives in the standard SPH manner by differentiating the kernel function. This results in smooth pressure forces, even for lower map resolutions, such that precomputation times and memory requirements are reduced by more than two orders of magnitude compared to density maps. Furthermore, this directly fits into the SPH concept so that volume maps can be seamlessly combined with existing SPH methods. Finally, the kernel function is not baked into the map such that the same volume map can be used with different kernels. This is especially useful when we want to incorporate common surface tension or viscosity methods that use different kernels than the fluid simulation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this article, we present a novel method for the robust handling of static and dynamic rigid boundaries in Smoothed Particle Hydrodynamics (SPH) simulations. We build upon the ideas of the density maps approach which has been introduced recently by Koschier and Bender. They precompute the density contributions of solid boundaries and store them on a spatial grid which can be efficiently queried during runtime. This alleviates the problems of commonly used boundary particles, like bumpy surfaces and inaccurate pressure forces near boundaries. Our method is based on a similar concept but we precompute the volume contribution of the boundary geometry. This maintains all benefits of density maps but offers a variety of advantages which are demonstrated in several experiments. First, in contrast to the density maps method we can compute derivatives in the standard SPH manner by differentiating the kernel function. This results in smooth pressure forces, even for lower map resolutions, such that precomputation times and memory requirements are reduced by more than two orders of magnitude compared to density maps. Furthermore, this directly fits into the SPH concept so that volume maps can be seamlessly combined with existing SPH methods. Finally, the kernel function is not baked into the map such that the same volume map can be used with different kernels. This is especially useful when we want to incorporate common surface tension or viscosity methods that use different kernels than the fluid simulation.", "title": "Implicit Frictional Boundary Handling for SPH", "normalizedTitle": "Implicit Frictional Boundary Handling for SPH", "fno": "09123549", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computational Fluid Dynamics", "Friction", "Hydrodynamics", "Smoothed Particle Hydrodynamics", "Surface Tension", "Viscosity", "Viscosity Methods", "Surface Tension", "SPH Methods", "Volume Map", "SPH Concept", "Memory Requirements", "Precomputation Times", "Map Resolutions", "Smooth Pressure Forces", "Kernel Function", "Standard SPH Manner", "Density Maps Method", "Boundary Geometry", "Volume Contribution", "Bumpy Surfaces", "Boundary Particles", "Spatial Grid", "Solid Boundaries", "Density Contributions", "Density Maps Approach", "Smoothed Particle Hydrodynamics Simulations", "Dynamic Rigid Boundaries", "Static Boundaries", "Implicit Frictional Boundary Handling", "Solids", "Kernel", "Couplings", "Standards", "Friction", "Hydrodynamics", "Runtime", "Smoothed Particle Hydrodynamics", "Fluid Simulation", "Boundary Handling" ], "authors": [ { "givenName": "Jan", "surname": "Bender", "fullName": "Jan Bender", "affiliation": "RWTH Aachen University, Aachen, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Tassilo", "surname": "Kugelstadt", "fullName": "Tassilo Kugelstadt", "affiliation": "RWTH Aachen University, Aachen, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Marcel", "surname": "Weiler", "fullName": "Marcel Weiler", "affiliation": "RWTH Aachen University, Aachen, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Dan", "surname": "Koschier", "fullName": "Dan Koschier", "affiliation": "RWTH Aachen University, Aachen, Germany", "__typename": "ArticleAuthorType" } ], "replicability": { "isEnabled": true, "codeDownloadUrl": "https://github.com/InteractiveComputerGraphics/SPlisHSPlasH.git", "codeRepositoryUrl": "https://github.com/InteractiveComputerGraphics/SPlisHSPlasH", "__typename": "ArticleReplicabilityType" }, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "10", "pubDate": "2020-10-01 00:00:00", "pubType": "trans", "pages": "2982-2993", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/svr/2013/5001/0/06655759", "title": "A Comparative Analysis of Fluid Simulation Methods Based on SPH", "doi": null, "abstractUrl": "/proceedings-article/svr/2013/06655759/12OmNqBtiNf", "parentPublication": { "id": "proceedings/svr/2013/5001/0", "title": "2013 XV Symposium on Virtual and Augmented Reality (SVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccis/2010/4270/0/4270b212", "title": "Comparisions of Mirror and Static Boundary Conditions in Incompressible Smoothed Particle Hydrodynamics", "doi": null, "abstractUrl": "/proceedings-article/iccis/2010/4270b212/12OmNvqEvO1", "parentPublication": { "id": "proceedings/iccis/2010/4270/0", "title": "2010 International Conference on Computational and Information Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icic/2010/7081/4/05514023", "title": "A New Boundary Treatment Method for SPH and Application in Fluid Simulation", "doi": null, "abstractUrl": "/proceedings-article/icic/2010/05514023/12OmNxtOO0u", "parentPublication": { "id": "proceedings/icic/2010/7081/4", "title": "2010 Third International Conference on Information and Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2012/4899/0/4899a374", "title": "SPH-Based Real-Time Wall-Fountain Simulation", "doi": null, "abstractUrl": "/proceedings-article/icdh/2012/4899a374/12OmNzuZUoG", "parentPublication": { "id": "proceedings/icdh/2012/4899/0", "title": "4th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/03/ttg2014030426", "title": "Implicit Incompressible SPH", "doi": null, "abstractUrl": "/journal/tg/2014/03/ttg2014030426/13rRUxYrbMg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/10/07932108", "title": "Pairwise Force SPH Model for Real-Time Multi-Interaction Applications", "doi": null, "abstractUrl": "/journal/tg/2017/10/07932108/13rRUyYjKan", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2022/2335/0/233500a041", "title": "Boundary-Aware Rectilinear Grid: Accurate Approximation of Unstructured Dataset into Rectilinear Grid with Solid Boundary Handling Capabilities", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2022/233500a041/1E2wgsxjfnW", "parentPublication": { "id": "proceedings/pacificvis/2022/2335/0", "title": "2022 IEEE 15th Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdiime/2022/9009/0/900900a048", "title": "Solid-Fluid Interaction Simulation System Based on SPH Unified Particle Framework", "doi": null, "abstractUrl": "/proceedings-article/icdiime/2022/900900a048/1Iz56eSpj3y", "parentPublication": { "id": "proceedings/icdiime/2022/9009/0", "title": "2022 International Conference on 3D Immersion, Interaction and Multi-sensory Experiences (ICDIIME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/10/09366379", "title": "Incompressibility Enforcement for Multiple-Fluid SPH Using Deformation Gradient", "doi": null, "abstractUrl": "/journal/tg/2022/10/09366379/1rCc9A58Mec", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08865648", "articleId": "1e2DipgV9bq", "__typename": "AdjacentArticleType" }, "next": { "fno": "09184389", "articleId": "1mLIesC5z0Y", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1mLI4Bv8vLi", "name": "ttg202010-09123549s1-supp1-3004245.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202010-09123549s1-supp1-3004245.mp4", "extension": "mp4", "size": "291 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNzvQI13", "title": "Oct.", "year": "2020", "issueNum": "10", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "Oct.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1mLIesC5z0Y", "doi": "10.1109/TVCG.2020.3003768", "abstract": "State-of-the-art methods for diminished reality propagate pixel information from a keyframe to subsequent frames for real-time inpainting. However, these approaches produce artifacts, if the scene geometry is not sufficiently planar. In this article, we present InpaintFusion, a new real-time method that extends inpainting to non-planar scenes by considering both color and depth information in the inpainting process. We use an RGB-D sensor for simultaneous localization and mapping, in order to both track the camera and obtain a surfel map in addition to RGB images. We use the RGB-D information in a cost function for both the color and the geometric appearance to derive a global optimization for simultaneous inpainting of color and depth. The inpainted depth is merged in a global map by depth fusion. For the final rendering, we project the map model into image space, where we can use it for effects such as relighting and stereo rendering of otherwise hidden structures. We demonstrate the capabilities of our method by comparing it to inpainting results with methods using planar geometric proxies.", "abstracts": [ { "abstractType": "Regular", "content": "State-of-the-art methods for diminished reality propagate pixel information from a keyframe to subsequent frames for real-time inpainting. However, these approaches produce artifacts, if the scene geometry is not sufficiently planar. In this article, we present InpaintFusion, a new real-time method that extends inpainting to non-planar scenes by considering both color and depth information in the inpainting process. We use an RGB-D sensor for simultaneous localization and mapping, in order to both track the camera and obtain a surfel map in addition to RGB images. We use the RGB-D information in a cost function for both the color and the geometric appearance to derive a global optimization for simultaneous inpainting of color and depth. The inpainted depth is merged in a global map by depth fusion. For the final rendering, we project the map model into image space, where we can use it for effects such as relighting and stereo rendering of otherwise hidden structures. We demonstrate the capabilities of our method by comparing it to inpainting results with methods using planar geometric proxies.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "State-of-the-art methods for diminished reality propagate pixel information from a keyframe to subsequent frames for real-time inpainting. However, these approaches produce artifacts, if the scene geometry is not sufficiently planar. In this article, we present InpaintFusion, a new real-time method that extends inpainting to non-planar scenes by considering both color and depth information in the inpainting process. We use an RGB-D sensor for simultaneous localization and mapping, in order to both track the camera and obtain a surfel map in addition to RGB images. We use the RGB-D information in a cost function for both the color and the geometric appearance to derive a global optimization for simultaneous inpainting of color and depth. The inpainted depth is merged in a global map by depth fusion. For the final rendering, we project the map model into image space, where we can use it for effects such as relighting and stereo rendering of otherwise hidden structures. We demonstrate the capabilities of our method by comparing it to inpainting results with methods using planar geometric proxies.", "title": "InpaintFusion: Incremental RGB-D Inpainting for 3D Scenes", "normalizedTitle": "InpaintFusion: Incremental RGB-D Inpainting for 3D Scenes", "fno": "09184389", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Cameras", "Image Colour Analysis", "Image Fusion", "Image Reconstruction", "Image Resolution", "Image Restoration", "Image Sensors", "Image Sequences", "Image Texture", "Object Detection", "Rendering Computer Graphics", "Stereo Image Processing", "Inpaint Fusion", "Incremental RGB D Inpainting", "Pixel Information", "Keyframe", "Subsequent Frames", "Real Time Inpainting", "Scene Geometry", "Nonplanar Scenes", "Depth Information", "Inpainting Process", "RGB D Sensor", "Surfel Map", "RGB Images", "RGB D Information", "Cost Function", "Geometric Appearance", "Global Optimization", "Simultaneous Inpainting", "Inpainted Depth", "Depth Fusion", "Map Model", "Image Space", "Planar Geometric Proxies", "Three Dimensional Displays", "Image Color Analysis", "Rendering Computer Graphics", "Cameras", "Simultaneous Localization And Mapping", "Real Time Systems", "Image Reconstruction", "Diminished Reality", "Inpainting", "Fusion", "SLAM" ], "authors": [ { "givenName": "Shohei", "surname": "Mori", "fullName": "Shohei Mori", "affiliation": "Institute of Computer Graphics and Vision, Graz University of Technology, Graz, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Okan", "surname": "Erat", "fullName": "Okan Erat", "affiliation": "Institute of Computer Graphics and Vision, Graz University of Technology, Graz, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Wolfgang", "surname": "Broll", "fullName": "Wolfgang Broll", "affiliation": "Virtual Worlds and Digital Games Group, Ilmenau University of Technology, Ilmenau, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Hideo", "surname": "Saito", "fullName": "Hideo Saito", "affiliation": "Department of Information and Computer Science, Keio University, Minato City, Tokyo, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Dieter", "surname": "Schmalstieg", "fullName": "Dieter Schmalstieg", "affiliation": "Institute of Computer Graphics and Vision, Graz University of Technology, Graz, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Denis", "surname": "Kalkofen", "fullName": "Denis Kalkofen", "affiliation": "Institute of Computer Graphics and Vision, Graz University of Technology, Graz, Austria", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "10", "pubDate": "2020-10-01 00:00:00", "pubType": "trans", "pages": "2994-3007", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/3dv/2015/8332/0/8332a452", "title": "Planes Detection for Robust Localization and Mapping in RGB-D SLAM Systems", "doi": null, "abstractUrl": "/proceedings-article/3dv/2015/8332a452/12OmNqH9hdY", "parentPublication": { "id": "proceedings/3dv/2015/8332/0", "title": "2015 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbr-lars-r/2016/3656/0/07783496", "title": "Object Subtraction Planar RGB-D SLAM", "doi": null, "abstractUrl": "/proceedings-article/sbr-lars-r/2016/07783496/12OmNxwENic", "parentPublication": { "id": "proceedings/sbr-lars-r/2016/3656/0", "title": "2016 XIII Latin-American Robotics Symposium and IV Brazilian Robotics Symposium (LARS/SBR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbrlarsrobocontrol/2014/6711/0/07024256", "title": "A Fast Visual Odometry and Mapping System for RGB-D Cameras", "doi": null, "abstractUrl": "/proceedings-article/sbrlarsrobocontrol/2014/07024256/12OmNylboJA", "parentPublication": { "id": "proceedings/sbrlarsrobocontrol/2014/6711/0", "title": "2014 Joint Conference on Robotics: SBR-LARS Robotics Symposium and Robocontrol (SBR LARS Robocontrol)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2016/0641/0/07477636", "title": "CoRBS: Comprehensive RGB-D benchmark for SLAM using Kinect v2", "doi": null, "abstractUrl": "/proceedings-article/wacv/2016/07477636/12OmNzsJ7Hx", "parentPublication": { "id": "proceedings/wacv/2016/0641/0", "title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2018/8425/0/842500a464", "title": "Multi-view Inpainting for RGB-D Sequence", "doi": null, "abstractUrl": "/proceedings-article/3dv/2018/842500a464/17D45WgziNa", "parentPublication": { "id": "proceedings/3dv/2018/8425/0", "title": "2018 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699212", "title": "3D PixMix: Image Inpainting in 3D Environments", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699212/19F1PUM1Yk0", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a308", "title": "Online Adaptive Integration of Observation and Inpainting for Diminished Reality with Online Surface Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a308/1J7Wkijm8Yo", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hdis/2022/9144/0/09991394", "title": "Pseudo Depth Maps for RGB-D SLAM", "doi": null, "abstractUrl": "/proceedings-article/hdis/2022/09991394/1JwQ1uhFF4s", "parentPublication": { "id": "proceedings/hdis/2022/9144/0", "title": "2022 International Conference on High Performance Big Data and Intelligent Systems (HDIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2019/2506/0/250600b248", "title": "RGB-D Indoor Mapping Using Deep Features", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2019/250600b248/1iTvoaCYwrS", "parentPublication": { "id": "proceedings/cvprw/2019/2506/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/11/09521742", "title": "Linear RGB-D SLAM for Structured Environments", "doi": null, "abstractUrl": "/journal/tp/2022/11/09521742/1wkrmZrcdcQ", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09123549", "articleId": "1kTxv3ChLeE", "__typename": "AdjacentArticleType" }, "next": { "fno": "08695851", "articleId": "19sOOqzUp7W", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1mNn3PlPhO8", "name": "ttg202010-09184389s1-supp2-3003768.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202010-09184389s1-supp2-3003768.mp4", "extension": "mp4", "size": "54 MB", "__typename": "WebExtraType" }, { "id": "1mNn6F8lYXe", "name": "ttg202010-09184389s1-supp1-3003768.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202010-09184389s1-supp1-3003768.mp4", "extension": "mp4", "size": "69.2 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNzvQI13", "title": "Oct.", "year": "2020", "issueNum": "10", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "Oct.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "19sOOqzUp7W", "doi": "10.1109/TVCG.2019.2912752", "abstract": "In a wide range of scientific fields, 3D datasets production capabilities have widely evolved in recent years, especially with the rapid increase in their sizes. As a result, many large-scale applications, including visualization or processing, have become challenging to address. A solution to this issue lies in providing out-of-core algorithms specifically designed to handle datasets significantly larger than memory. In this article, we present a new approach that extends the broad interactive addressing principles already established in the field of out-of-core volume rendering on GPUs to allow on-demand processing during the visualization stage. We propose a pipeline designed to manage data as regular 3D grids regardless of the underlying application. It relies on a caching approach with a virtual memory addressing system coupled to an efficient parallel management on GPU to provide efficient access to data in interactive time. It allows any visualization or processing application to leverage the flexibility of its structure by managing multi-modality datasets. Furthermore, we show that our system delivers good performance on a single standard PC with low memory budget on the GPU.", "abstracts": [ { "abstractType": "Regular", "content": "In a wide range of scientific fields, 3D datasets production capabilities have widely evolved in recent years, especially with the rapid increase in their sizes. As a result, many large-scale applications, including visualization or processing, have become challenging to address. A solution to this issue lies in providing out-of-core algorithms specifically designed to handle datasets significantly larger than memory. In this article, we present a new approach that extends the broad interactive addressing principles already established in the field of out-of-core volume rendering on GPUs to allow on-demand processing during the visualization stage. We propose a pipeline designed to manage data as regular 3D grids regardless of the underlying application. It relies on a caching approach with a virtual memory addressing system coupled to an efficient parallel management on GPU to provide efficient access to data in interactive time. It allows any visualization or processing application to leverage the flexibility of its structure by managing multi-modality datasets. Furthermore, we show that our system delivers good performance on a single standard PC with low memory budget on the GPU.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In a wide range of scientific fields, 3D datasets production capabilities have widely evolved in recent years, especially with the rapid increase in their sizes. As a result, many large-scale applications, including visualization or processing, have become challenging to address. A solution to this issue lies in providing out-of-core algorithms specifically designed to handle datasets significantly larger than memory. In this article, we present a new approach that extends the broad interactive addressing principles already established in the field of out-of-core volume rendering on GPUs to allow on-demand processing during the visualization stage. We propose a pipeline designed to manage data as regular 3D grids regardless of the underlying application. It relies on a caching approach with a virtual memory addressing system coupled to an efficient parallel management on GPU to provide efficient access to data in interactive time. It allows any visualization or processing application to leverage the flexibility of its structure by managing multi-modality datasets. Furthermore, we show that our system delivers good performance on a single standard PC with low memory budget on the GPU.", "title": "Interactive Visualization and On-Demand Processing of Large Volume Data: A Fully GPU-Based Out-of-Core Approach", "normalizedTitle": "Interactive Visualization and On-Demand Processing of Large Volume Data: A Fully GPU-Based Out-of-Core Approach", "fno": "08695851", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Cache Storage", "Data Visualisation", "Graphics Processing Units", "Interactive Systems", "Rendering Computer Graphics", "Solid Modelling", "Out Of Core Volume Rendering", "Single Standard PC", "Multimodality Datasets Management", "Fully GPU Based Out Of Core Approach", "Low Memory Budget", "Efficient Parallel Management", "Virtual Memory Addressing System", "Caching Approach", "Regular 3 D Grids", "Broad Interactive Addressing Principles", "Large Scale Applications", "Large Volume Data", "On Demand Processing", "Interactive Visualization", "Graphics Processing Units", "Data Visualization", "Pipelines", "Memory Management", "Three Dimensional Displays", "Rendering Computer Graphics", "Casting", "GPU", "Caching System", "Out Of Core Data Management", "Large Data", "Interactive Visualization", "On Demand Processing" ], "authors": [ { "givenName": "Jonathan", "surname": "Sarton", "fullName": "Jonathan Sarton", "affiliation": "Université de Reims Champagne-Ardenne, Reims, France", "__typename": "ArticleAuthorType" }, { "givenName": "Nicolas", "surname": "Courilleau", "fullName": "Nicolas Courilleau", "affiliation": "Université de Reims Champagne-Ardenne, Reims, France", "__typename": "ArticleAuthorType" }, { "givenName": "Yannick", "surname": "Remion", "fullName": "Yannick Remion", "affiliation": "Université de Reims Champagne-Ardenne, Reims, France", "__typename": "ArticleAuthorType" }, { "givenName": "Laurent", "surname": "Lucas", "fullName": "Laurent Lucas", "affiliation": "Université de Reims Champagne-Ardenne, Reims, France", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "10", "pubDate": "2020-10-01 00:00:00", "pubType": "trans", "pages": "3008-3021", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/2003/2030/0/20300038", "title": "Acceleration Techniques for GPU-based Volume Rendering", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2003/20300038/12OmNC2xhD8", "parentPublication": { "id": "proceedings/ieee-vis/2003/2030/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/unesst/2015/9852/0/9852a022", "title": "Increasing GPU-Speedup of Volume Rendering for Images with High Complexity", "doi": null, "abstractUrl": "/proceedings-article/unesst/2015/9852a022/12OmNCmpcES", "parentPublication": { "id": "proceedings/unesst/2015/9852/0", "title": "2015 8th International Conference on u- and e- Service, Science and Technology (UNESST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/2012/2049/0/06266300", "title": "A flexible Java GPU-enhanced visualization framework and its applications", "doi": null, "abstractUrl": "/proceedings-article/cbms/2012/06266300/12OmNwHQBby", "parentPublication": { "id": "proceedings/cbms/2012/2049/0", "title": "2012 25th IEEE International Symposium on Computer-Based Medical Systems (CBMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/unesst/2015/9852/0/9852a018", "title": "Complexity Evaluation of CT-Images for GPU-Based Volume Rendering", "doi": null, "abstractUrl": "/proceedings-article/unesst/2015/9852a018/12OmNxeutee", "parentPublication": { "id": "proceedings/unesst/2015/9852/0", "title": "2015 8th International Conference on u- and e- Service, Science and Technology (UNESST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ldav/2014/5215/0/07013209", "title": "Out-of-core visualization of time-varying hybrid-grid volume data", "doi": null, "abstractUrl": "/proceedings-article/ldav/2014/07013209/12OmNyoiZc2", "parentPublication": { "id": "proceedings/ldav/2014/5215/0", "title": "2014 IEEE 4th Symposium on Large Data Analysis and Visualization (LDAV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/10/ttg2013101732", "title": "Octree Rasterization: Accelerating High-Quality Out-of-Core GPU Volume Rendering", "doi": null, "abstractUrl": "/journal/tg/2013/10/ttg2013101732/13rRUwvBy8T", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2008/05/mcg2008050066", "title": "Dynamic Shader Generation for GPU-Based Multi-Volume Ray Casting", "doi": null, "abstractUrl": "/magazine/cg/2008/05/mcg2008050066/13rRUxN5evD", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/10/08100972", "title": "Multiresolution Volume Filtering in the Tensor Compressed Domain", "doi": null, "abstractUrl": "/journal/tg/2018/10/08100972/13rRUxjQypg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/09/09079657", "title": "Distributed Interactive Visualization Using GPU-Optimized Spark", "doi": null, "abstractUrl": "/journal/tg/2021/09/09079657/1jmVbp8XqZa", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ldav/2021/3283/0/328300a043", "title": "GPU-based Image Compression for Efficient Compositing in Distributed Rendering Applications", "doi": null, "abstractUrl": "/proceedings-article/ldav/2021/328300a043/1zdPDTXc4hy", "parentPublication": { "id": "proceedings/ldav/2021/3283/0", "title": "2021 IEEE 11th Symposium on Large Data Analysis and Visualization (LDAV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09184389", "articleId": "1mLIesC5z0Y", "__typename": "AdjacentArticleType" }, "next": { "fno": "08700299", "articleId": "19xNwtl1N4s", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzvQI13", "title": "Oct.", "year": "2020", "issueNum": "10", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "Oct.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "19xNwtl1N4s", "doi": "10.1109/TVCG.2019.2913418", "abstract": "We propose a theoretical framework, based on the theory of Sobolev spaces, that allows for a comprehensive analysis of quadrature rules for integration over the sphere. We apply this framework to the case of shading integrals in order to predict and analyze the performances of quadrature methods. We show that the spectral distribution of the quadrature error depends not only on the samples set size, distribution and weights, but also on the BRDF and the integrand smoothness. The proposed spectral analysis of quadrature error allows for a better understanding of how the above different factors interact. We also extend our analysis to the case of Fourier truncation-based techniques applied to the shading integral, so as to find the smallest spherical/hemispherical harmonics degree L (truncation) that entails a targeted integration error. This application is very beneficial to global illumination methods such as Precomputed Radiance Transfer and Radiance Caching. Finally, our proposed framework is the first to allow a direct theoretical comparison between quadrature- and truncation-based methods applied to the shading integral. This enables, for example, to determine the spherical harmonics degree L which corresponds to a quadrature-based integration with N samples. Our theoretical findings are validated by a set of rendering experiments.", "abstracts": [ { "abstractType": "Regular", "content": "We propose a theoretical framework, based on the theory of Sobolev spaces, that allows for a comprehensive analysis of quadrature rules for integration over the sphere. We apply this framework to the case of shading integrals in order to predict and analyze the performances of quadrature methods. We show that the spectral distribution of the quadrature error depends not only on the samples set size, distribution and weights, but also on the BRDF and the integrand smoothness. The proposed spectral analysis of quadrature error allows for a better understanding of how the above different factors interact. We also extend our analysis to the case of Fourier truncation-based techniques applied to the shading integral, so as to find the smallest spherical/hemispherical harmonics degree L (truncation) that entails a targeted integration error. This application is very beneficial to global illumination methods such as Precomputed Radiance Transfer and Radiance Caching. Finally, our proposed framework is the first to allow a direct theoretical comparison between quadrature- and truncation-based methods applied to the shading integral. This enables, for example, to determine the spherical harmonics degree L which corresponds to a quadrature-based integration with N samples. Our theoretical findings are validated by a set of rendering experiments.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose a theoretical framework, based on the theory of Sobolev spaces, that allows for a comprehensive analysis of quadrature rules for integration over the sphere. We apply this framework to the case of shading integrals in order to predict and analyze the performances of quadrature methods. We show that the spectral distribution of the quadrature error depends not only on the samples set size, distribution and weights, but also on the BRDF and the integrand smoothness. The proposed spectral analysis of quadrature error allows for a better understanding of how the above different factors interact. We also extend our analysis to the case of Fourier truncation-based techniques applied to the shading integral, so as to find the smallest spherical/hemispherical harmonics degree L (truncation) that entails a targeted integration error. This application is very beneficial to global illumination methods such as Precomputed Radiance Transfer and Radiance Caching. Finally, our proposed framework is the first to allow a direct theoretical comparison between quadrature- and truncation-based methods applied to the shading integral. This enables, for example, to determine the spherical harmonics degree L which corresponds to a quadrature-based integration with N samples. Our theoretical findings are validated by a set of rendering experiments.", "title": "Spectral Analysis of Quadrature Rules and Fourier Truncation-Based Methods Applied to Shading Integrals", "normalizedTitle": "Spectral Analysis of Quadrature Rules and Fourier Truncation-Based Methods Applied to Shading Integrals", "fno": "08700299", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Integration", "Lighting", "Rendering Computer Graphics", "Spectral Analysis", "Fourier Truncation Based Techniques", "Shading Integral", "Targeted Integration Error", "Global Illumination Methods", "Quadrature Based Integration", "Spectral Analysis", "Quadrature Rules", "Quadrature Methods", "Spectral Distribution", "Quadrature Error", "Rendering Experiments", "BRDF", "Integrand Smoothness", "Spectral Analysis", "Harmonic Analysis", "Monte Carlo Methods", "Rendering Computer Graphics", "Stochastic Processes", "Lighting", "Kernel", "Rendering Equation", "Spectral Analysis", "Monte Carlo Methods", "Spherical Harmonics Decomposition" ], "authors": [ { "givenName": "Ricardo", "surname": "Marques", "fullName": "Ricardo Marques", "affiliation": "Grup de Recerca de Tecnologies Interactives, Universitat Pompeu Fabra, Barcelona, Spain", "__typename": "ArticleAuthorType" }, { "givenName": "Christian", "surname": "Bouville", "fullName": "Christian Bouville", "affiliation": "IRISA, University of Rennes, 1 Rennes, France", "__typename": "ArticleAuthorType" }, { "givenName": "Kadi", "surname": "Bouatouch", "fullName": "Kadi Bouatouch", "affiliation": "IRISA, University of Rennes, 1 Rennes, France", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "10", "pubDate": "2020-10-01 00:00:00", "pubType": "trans", "pages": "3022-3036", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cw/2012/4814/0/4814a268", "title": "Precomputed Radiance Transfer as a Variance Reduction Technique -- A Small Case Study", "doi": null, "abstractUrl": "/proceedings-article/cw/2012/4814a268/12OmNButq4G", "parentPublication": { "id": "proceedings/cw/2012/4814/0", "title": "2012 International Conference on Cyberworlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2009/3804/3/3804c277", "title": "Research on Optimized Combined Cosine Windows with Maximum Side Lobe Decay for Harmonic Analysis", "doi": null, "abstractUrl": "/proceedings-article/icicta/2009/3804c277/12OmNyQ7G7B", "parentPublication": { "id": null, "title": null, "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460071", "title": "Shading derivation from an unspecified object for augmented reality", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460071/12OmNzAohXY", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/1991/07/i0671", "title": "Reflections on Shading", "doi": null, "abstractUrl": "/journal/tp/1991/07/i0671/13rRUIIVllo", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/09/ttg2013091566", "title": "VisibilityCluster: Average Directional Visibility for Many-Light Rendering", "doi": null, "abstractUrl": "/journal/tg/2013/09/ttg2013091566/13rRUwInvsP", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/1997/09/i1020", "title": "Toward Accurate Recovery of Shape from Shading Under Diffuse Lighting", "doi": null, "abstractUrl": "/journal/tp/1997/09/i1020/13rRUwhHcRI", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300h819", "title": "GLoSH: Global-Local Spherical Harmonics for Intrinsic Image Decomposition", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300h819/1hQqy771H9u", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2020/8128/0/812800b147", "title": "Precomputed Radiance Transfer for Reflectance and Lighting Estimation", "doi": null, "abstractUrl": "/proceedings-article/3dv/2020/812800b147/1qyxlpSwLhC", "parentPublication": { "id": "proceedings/3dv/2020/8128/0", "title": "2020 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/11/09573466", "title": "Towards a Unified Quadrature Framework for Large-Scale Kernel Machines", "doi": null, "abstractUrl": "/journal/tp/2022/11/09573466/1xH5DisLLsQ", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2021/2688/0/268800b075", "title": "Recovering Real-World Reflectance Properties and Shading From HDR Imagery", "doi": null, "abstractUrl": "/proceedings-article/3dv/2021/268800b075/1zWEfggzOaA", "parentPublication": { "id": "proceedings/3dv/2021/2688/0", "title": "2021 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08695851", "articleId": "19sOOqzUp7W", "__typename": "AdjacentArticleType" }, "next": { "fno": "08703138", "articleId": "19Er78PKJKE", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1mLIfqlerWE", "name": "ttg202010-08700299s1-additional_material.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202010-08700299s1-additional_material.pdf", "extension": "pdf", "size": "2.27 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNzvQI13", "title": "Oct.", "year": "2020", "issueNum": "10", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "Oct.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "19Er78PKJKE", "doi": "10.1109/TVCG.2019.2914044", "abstract": "3D printed objects are rapidly becoming prevalent in science, technology and daily life. An important question is how to obtain strong and durable 3D models using standard printing techniques. This question is often translated to computing smartly designed interior structures that provide strong support and yield resistant 3D models. In this paper we suggest a combination between 3D printing and material injection to achieve strong 3D printed objects. We utilize triply periodic minimal surfaces (TPMS) to define novel interior support structures. TPMS are closed form and can be computed in a simple and straightforward manner. Since TPMS are smooth and connected, we utilize them to define channels that adequately distribute injected materials in the shape interior. To account for weak regions, TPMS channels are locally optimized according to the shape stress field. After the object is printed, we simply inject the TPMS channels with materials that solidify and yield a strong inner structure that supports the shape. Our method allows injecting a wide range of materials in an object interior in a fast and easy manner. Results demonstrate the efficiency of strong printing by combining 3D printing and injection together.", "abstracts": [ { "abstractType": "Regular", "content": "3D printed objects are rapidly becoming prevalent in science, technology and daily life. An important question is how to obtain strong and durable 3D models using standard printing techniques. This question is often translated to computing smartly designed interior structures that provide strong support and yield resistant 3D models. In this paper we suggest a combination between 3D printing and material injection to achieve strong 3D printed objects. We utilize triply periodic minimal surfaces (TPMS) to define novel interior support structures. TPMS are closed form and can be computed in a simple and straightforward manner. Since TPMS are smooth and connected, we utilize them to define channels that adequately distribute injected materials in the shape interior. To account for weak regions, TPMS channels are locally optimized according to the shape stress field. After the object is printed, we simply inject the TPMS channels with materials that solidify and yield a strong inner structure that supports the shape. Our method allows injecting a wide range of materials in an object interior in a fast and easy manner. Results demonstrate the efficiency of strong printing by combining 3D printing and injection together.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "3D printed objects are rapidly becoming prevalent in science, technology and daily life. An important question is how to obtain strong and durable 3D models using standard printing techniques. This question is often translated to computing smartly designed interior structures that provide strong support and yield resistant 3D models. In this paper we suggest a combination between 3D printing and material injection to achieve strong 3D printed objects. We utilize triply periodic minimal surfaces (TPMS) to define novel interior support structures. TPMS are closed form and can be computed in a simple and straightforward manner. Since TPMS are smooth and connected, we utilize them to define channels that adequately distribute injected materials in the shape interior. To account for weak regions, TPMS channels are locally optimized according to the shape stress field. After the object is printed, we simply inject the TPMS channels with materials that solidify and yield a strong inner structure that supports the shape. Our method allows injecting a wide range of materials in an object interior in a fast and easy manner. Results demonstrate the efficiency of strong printing by combining 3D printing and injection together.", "title": "Strong 3D Printing by TPMS Injection", "normalizedTitle": "Strong 3D Printing by TPMS Injection", "fno": "08703138", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Durability", "Optimisation", "Production Engineering Computing", "Rapid Prototyping Industrial", "Solid Modelling", "Three Dimensional Printing", "Strong D Models", "Durable 3 D Models", "Standard Printing Techniques", "Smartly Designed Interior Structures", "Material Injection", "Strong 3 D Printed Objects", "Triply Periodic Minimal Surfaces", "Interior Support Structures", "Straightforward Manner", "Injected Materials", "Shape Interior", "TPMS Channels", "Strong Inner Structure", "Object Interior", "Strong Printing", "TPMS Injection", "Shape Stress Field", "Three Dimensional Displays", "Shape", "Three Dimensional Printing", "Optimization", "Stress", "Topology", "Fabrication", "3 D Printing", "Minimal Surfaces", "Inner Structures", "Injection" ], "authors": [ { "givenName": "Xin", "surname": "Yan", "fullName": "Xin Yan", "affiliation": "School of Computer Science and Technology, Shandong University, Qingdao, Shandong, China", "__typename": "ArticleAuthorType" }, { "givenName": "Cong", "surname": "Rao", "fullName": "Cong Rao", "affiliation": "School of Computer Science and Technology, Shandong University, Qingdao, Shandong, China", "__typename": "ArticleAuthorType" }, { "givenName": "Lin", "surname": "Lu", "fullName": "Lin Lu", "affiliation": "School of Computer Science and Technology, Shandong University, Qingdao, Shandong, China", "__typename": "ArticleAuthorType" }, { "givenName": "Andrei", "surname": "Sharf", "fullName": "Andrei Sharf", "affiliation": "Department of Computer Science, Ben-Gurion University, Beersheba, Israel", "__typename": "ArticleAuthorType" }, { "givenName": "Haisen", "surname": "Zhao", "fullName": "Haisen Zhao", "affiliation": "School of Computer Science and Technology, Shandong University, Qingdao, Shandong, China", "__typename": "ArticleAuthorType" }, { "givenName": "Baoquan", "surname": "Chen", "fullName": "Baoquan Chen", "affiliation": "Peking University, Beijing, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "10", "pubDate": "2020-10-01 00:00:00", "pubType": "trans", "pages": "3037-3050", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icvrv/2014/6854/0/6854a408", "title": "YaRep: A Personal 3D Printing Simulator", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2014/6854a408/12OmNBU1jL7", "parentPublication": { "id": "proceedings/icvrv/2014/6854/0", "title": "2014 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2013/06/mcg2013060024", "title": "Computational Aspects of Fabrication: Modeling, Design, and 3D Printing", "doi": null, "abstractUrl": "/magazine/cg/2013/06/mcg2013060024/13rRUwbJCZh", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2013/06/mcg2013060048", "title": "3D-Printing Spatially Varying BRDFs", "doi": null, "abstractUrl": "/magazine/cg/2013/06/mcg2013060048/13rRUxly97Z", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/10/08086214", "title": "Toward Support-Free 3D Printing: A Skeletal Approach for Partitioning Models", "doi": null, "abstractUrl": "/journal/tg/2018/10/08086214/13rRUy0HYRy", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2018/7315/0/731500a001", "title": "Modeling Single-Gyroid Structures in Surface Mesh Models for 3D Printing", "doi": null, "abstractUrl": "/proceedings-article/cw/2018/731500a001/17D45WrVgg7", "parentPublication": { "id": "proceedings/cw/2018/7315/0", "title": "2018 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/eitt/2022/9248/0/924800a071", "title": "3D Printing Technology Supports the Learning of Geometry in Primary School Mathematics", "doi": null, "abstractUrl": "/proceedings-article/eitt/2022/924800a071/1MrSNmKwq9a", "parentPublication": { "id": "proceedings/eitt/2022/9248/0", "title": "2022 Eleventh International Conference of Educational Innovation through Technology (EITT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icedme/2020/8145/0/09122125", "title": "Design of New Triple Slider for 3D-Printing application", "doi": null, "abstractUrl": "/proceedings-article/icedme/2020/09122125/1kRSEEIlQti", "parentPublication": { "id": "proceedings/icedme/2020/8145/0", "title": "2020 3rd International Conference on Electron Device and Mechanical Engineering (ICEDME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/07/09258406", "title": "Efficient Representation and Optimization for TPMS-Based Porous Structures", "doi": null, "abstractUrl": "/journal/tg/2022/07/09258406/1oHi1OQbVOo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icedme/2021/3596/0/359600a071", "title": "Application and Research of 3D Printing Technology in the Field of Architecture", "doi": null, "abstractUrl": "/proceedings-article/icedme/2021/359600a071/1tMPRFuihQQ", "parentPublication": { "id": "proceedings/icedme/2021/3596/0", "title": "2021 4th International Conference on Electron Device and Mechanical Engineering (ICEDME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2020/9234/0/923400a304", "title": "Design and optimization of bionic bone with micropore structure suitable for 3D printing", "doi": null, "abstractUrl": "/proceedings-article/icdh/2020/923400a304/1uGXVgnGyHK", "parentPublication": { "id": "proceedings/icdh/2020/9234/0", "title": "2020 8th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08700299", "articleId": "19xNwtl1N4s", "__typename": "AdjacentArticleType" }, "next": { "fno": "08718533", "articleId": "1abD1LIMBlS", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzvQI13", "title": "Oct.", "year": "2020", "issueNum": "10", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "Oct.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1abD1LIMBlS", "doi": "10.1109/TVCG.2019.2917689", "abstract": "A viewer can extract many potential patterns from any set of visualized data values. But that means that two people can see different patterns in the same visualization, potentially leading to miscommunication. Here, we show that when people are primed to see one pattern in the data as visually salient, they believe that naïve viewers will experience the same visual salience. Participants were told one of multiple backstories about political events that affected public polling data, before viewing a graph that depicted those data. One pattern in the data was particularly visually salient to them given the backstory that they heard. They then predicted what naïve viewers would most visually salient on the visualization. They were strongly influenced by their own knowledge, despite explicit instructions to ignore it, predicting that others would find the same patterns to be most visually salient. This result reflects a psychological phenomenon known as the curse of knowledge, where an expert struggles to re-create the state of mind of a novice. The present findings show that the curse of knowledge also plagues the visual perception of data, explaining why people can fail to connect with audiences when they communicate patterns in data.", "abstracts": [ { "abstractType": "Regular", "content": "A viewer can extract many potential patterns from any set of visualized data values. But that means that two people can see different patterns in the same visualization, potentially leading to miscommunication. Here, we show that when people are primed to see one pattern in the data as visually salient, they believe that naïve viewers will experience the same visual salience. Participants were told one of multiple backstories about political events that affected public polling data, before viewing a graph that depicted those data. One pattern in the data was particularly visually salient to them given the backstory that they heard. They then predicted what naïve viewers would most visually salient on the visualization. They were strongly influenced by their own knowledge, despite explicit instructions to ignore it, predicting that others would find the same patterns to be most visually salient. This result reflects a psychological phenomenon known as the curse of knowledge, where an expert struggles to re-create the state of mind of a novice. The present findings show that the curse of knowledge also plagues the visual perception of data, explaining why people can fail to connect with audiences when they communicate patterns in data.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A viewer can extract many potential patterns from any set of visualized data values. But that means that two people can see different patterns in the same visualization, potentially leading to miscommunication. Here, we show that when people are primed to see one pattern in the data as visually salient, they believe that naïve viewers will experience the same visual salience. Participants were told one of multiple backstories about political events that affected public polling data, before viewing a graph that depicted those data. One pattern in the data was particularly visually salient to them given the backstory that they heard. They then predicted what naïve viewers would most visually salient on the visualization. They were strongly influenced by their own knowledge, despite explicit instructions to ignore it, predicting that others would find the same patterns to be most visually salient. This result reflects a psychological phenomenon known as the curse of knowledge, where an expert struggles to re-create the state of mind of a novice. The present findings show that the curse of knowledge also plagues the visual perception of data, explaining why people can fail to connect with audiences when they communicate patterns in data.", "title": "The Curse of Knowledge in Visual Data Communication", "normalizedTitle": "The Curse of Knowledge in Visual Data Communication", "fno": "08718533", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Communication", "Data Visualisation", "Feature Extraction", "Psychology", "Visual Perception", "Visual Data Communication", "Potential Patterns", "Visualized Data Values", "Nai X 0308 Ve Viewers", "Visual Salience", "Multiple Backstories", "Political Events", "Public Polling Data", "Backstory", "Psychological Phenomenon", "Knowledge Curse", "Data Visualization", "Visualization", "Psychology", "Decision Making", "Data Communication", "Data Mining", "Cognition", "Cognitive Biases", "Data Communication", "Expertise", "Information Visualization", "Perception And Cognition" ], "authors": [ { "givenName": "Cindy", "surname": "Xiong", "fullName": "Cindy Xiong", "affiliation": "Northwestern University, Evanston, IL, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Lisanne", "surname": "Van Weelden", "fullName": "Lisanne Van Weelden", "affiliation": "Utrecht University, Utrecht, Netherlands", "__typename": "ArticleAuthorType" }, { "givenName": "Steven", "surname": "Franconeri", "fullName": "Steven Franconeri", "affiliation": "Northwestern University, Evanston, IL, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "10", "pubDate": "2020-10-01 00:00:00", "pubType": "trans", "pages": "3051-3062", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/re/2013/5765/0/06636720", "title": "Keeping requirements on track via visual analytics", "doi": null, "abstractUrl": "/proceedings-article/re/2013/06636720/12OmNqH9hoK", "parentPublication": { "id": "proceedings/re/2013/5765/0", "title": "2013 IEEE 21st International Requirements Engineering Conference (RE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ccem/2016/4573/0/4573a090", "title": "Visual Analytics of Terrorism Data", "doi": null, "abstractUrl": "/proceedings-article/ccem/2016/4573a090/12OmNwdtwaG", "parentPublication": { "id": "proceedings/ccem/2016/4573/0", "title": "2016 IEEE International Conference on Cloud Computing in Emerging Markets (CCEM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2008/2570/0/04607627", "title": "Dynamic visual saliency modeling based on spatiotemporal analysis", "doi": null, "abstractUrl": "/proceedings-article/icme/2008/04607627/12OmNwp74M8", "parentPublication": { "id": "proceedings/icme/2008/2570/0", "title": "2008 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi/2016/4470/0/4470a709", "title": "An Interactive Circular Visual Analytic Tool for Visualization of Web Data", "doi": null, "abstractUrl": "/proceedings-article/wi/2016/4470a709/12OmNxwENqO", "parentPublication": { "id": "proceedings/wi/2016/4470/0", "title": "2016 IEEE/WIC/ACM International Conference on Web Intelligence (WI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cad-graphics/2013/2576/0/06814998", "title": "Visual Saliency Guided Global and Local Resizing for 3D Models", "doi": null, "abstractUrl": "/proceedings-article/cad-graphics/2013/06814998/12OmNyo1o1L", "parentPublication": { "id": "proceedings/cad-graphics/2013/2576/0", "title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2017/06/mcg2017060010", "title": "Visual Communication and Cognition in Everyday Decision-Making", "doi": null, "abstractUrl": "/magazine/cg/2017/06/mcg2017060010/13rRUx0xPyU", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2016/04/mcg2016040046", "title": "Measuring the Visual Salience of 3D Printed Objects", "doi": null, "abstractUrl": "/magazine/cg/2016/04/mcg2016040046/13rRUxN5evM", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vds/2017/3185/0/08573439", "title": "Clear Visual Separation of Temporal Event Sequences", "doi": null, "abstractUrl": "/proceedings-article/vds/2017/08573439/17D45W9KVGv", "parentPublication": { "id": "proceedings/vds/2017/3185/0", "title": "2017 IEEE Visualization in Data Science (VDS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/01/08440809", "title": "Mitigating the Attraction Effect with Visualizations", "doi": null, "abstractUrl": "/journal/tg/2019/01/08440809/17D45XH89qj", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2019/4896/0/489600a620", "title": "Mining Logic Patterns from Visual Data", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2019/489600a620/1gAwYsaNpcs", "parentPublication": { "id": "proceedings/icdmw/2019/4896/0", "title": "2019 International Conference on Data Mining Workshops (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08703138", "articleId": "19Er78PKJKE", "__typename": "AdjacentArticleType" }, "next": { "fno": "08676380", "articleId": "18NkfV97jEc", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzvQI13", "title": "Oct.", "year": "2020", "issueNum": "10", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "Oct.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "18NkfV97jEc", "doi": "10.1109/TVCG.2019.2908166", "abstract": "The analysis of subtle deviations between different versions of historical prints has been a long-standing challenge in art history research. So far, this challenge has required extensive domain knowledge, fine-tuned expert perception, and time-consuming manual labor. In this paper we introduce an explorative visual approach to facilitate fast and accurate support for the task of comparing differences between prints such as engravings and woodcuts. To this end, we have developed a customized algorithm that detects similar stroke-patterns in prints and matches them in order to allow visual alignment and automated deviation highlighting. Our visual analytics system enables art history researchers to quickly detect, document, and categorize qualitative and quantitative discrepancies, and to analyze these discrepancies using comprehensive interactions. To evaluate our approach, we conducted a user study involving both experts on historical prints and laypeople. Using our new interactive technique, our subjects found about 20 percent more differences compared to regular image viewing software as well as “paper-based” comparison. Moreover, the laypeople found the same differences as the experts when they used our system, which was not the case for conventional methods. Informal feedback showed that both laypeople and experts strongly preferred employing our system to working with conventional methods.", "abstracts": [ { "abstractType": "Regular", "content": "The analysis of subtle deviations between different versions of historical prints has been a long-standing challenge in art history research. So far, this challenge has required extensive domain knowledge, fine-tuned expert perception, and time-consuming manual labor. In this paper we introduce an explorative visual approach to facilitate fast and accurate support for the task of comparing differences between prints such as engravings and woodcuts. To this end, we have developed a customized algorithm that detects similar stroke-patterns in prints and matches them in order to allow visual alignment and automated deviation highlighting. Our visual analytics system enables art history researchers to quickly detect, document, and categorize qualitative and quantitative discrepancies, and to analyze these discrepancies using comprehensive interactions. To evaluate our approach, we conducted a user study involving both experts on historical prints and laypeople. Using our new interactive technique, our subjects found about 20 percent more differences compared to regular image viewing software as well as “paper-based” comparison. Moreover, the laypeople found the same differences as the experts when they used our system, which was not the case for conventional methods. Informal feedback showed that both laypeople and experts strongly preferred employing our system to working with conventional methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The analysis of subtle deviations between different versions of historical prints has been a long-standing challenge in art history research. So far, this challenge has required extensive domain knowledge, fine-tuned expert perception, and time-consuming manual labor. In this paper we introduce an explorative visual approach to facilitate fast and accurate support for the task of comparing differences between prints such as engravings and woodcuts. To this end, we have developed a customized algorithm that detects similar stroke-patterns in prints and matches them in order to allow visual alignment and automated deviation highlighting. Our visual analytics system enables art history researchers to quickly detect, document, and categorize qualitative and quantitative discrepancies, and to analyze these discrepancies using comprehensive interactions. To evaluate our approach, we conducted a user study involving both experts on historical prints and laypeople. Using our new interactive technique, our subjects found about 20 percent more differences compared to regular image viewing software as well as “paper-based” comparison. Moreover, the laypeople found the same differences as the experts when they used our system, which was not the case for conventional methods. Informal feedback showed that both laypeople and experts strongly preferred employing our system to working with conventional methods.", "title": "VeCHArt: Visually Enhanced Comparison of Historic Art Using an Automated Line-Based Synchronization Technique", "normalizedTitle": "VeCHArt: Visually Enhanced Comparison of Historic Art Using an Automated Line-Based Synchronization Technique", "fno": "08676380", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Art", "Data Analysis", "Data Visualisation", "History", "Synchronisation", "User Interfaces", "Ve CH Art", "Visually Enhanced Comparison", "Historic Art", "Automated Line Based Synchronization Technique", "Subtle Deviations", "Historical Prints", "Art History Research", "Extensive Domain Knowledge", "Fine Tuned Expert Perception", "Time Consuming Manual Labor", "Explorative Visual Approach", "Accurate Support", "Engravings", "Woodcuts", "Customized Algorithm", "Visual Alignment", "Automated Deviation Highlighting", "Visual Analytics System", "Art History Researchers", "Qualitative Discrepancies", "Quantitative Discrepancies", "Comprehensive Interactions", "Interactive Technique", "Paper Based Comparison", "Stroke Pattern Detection", "Efficiency 20 0 Percent", "Visualization", "Art", "History", "Synchronization", "Printing", "Task Analysis", "Tools", "Visual Analytics", "User Interaction", "Art History", "Qualitative Evaluation", "Visual Comparison" ], "authors": [ { "givenName": "Hermann", "surname": "Pflüger", "fullName": "Hermann Pflüger", "affiliation": "Institute of Visualization and Interactive Systems, University of Stuttgart, Stuttgart, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Dennis", "surname": "Thom", "fullName": "Dennis Thom", "affiliation": "Institute of Visualization and Interactive Systems, University of Stuttgart, Stuttgart, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Anna", "surname": "Schütz", "fullName": "Anna Schütz", "affiliation": "Institute of Art History, University of Stuttgart, Stuttgart, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Daniela", "surname": "Bohde", "fullName": "Daniela Bohde", "affiliation": "Institute of Art History, University of Stuttgart, Stuttgart, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Thomas", "surname": "Ertl", "fullName": "Thomas Ertl", "affiliation": "Institute of Visualization and Interactive Systems, University of Stuttgart, Stuttgart, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "10", "pubDate": "2020-10-01 00:00:00", "pubType": "trans", "pages": "3063-3076", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iv/2017/0831/0/0831a336", "title": "InfluViz — A Visualization Tool for Exploring and Analyzing Creative Influence Between Artists and their Works", "doi": null, "abstractUrl": "/proceedings-article/iv/2017/0831a336/12OmNzcPAHB", "parentPublication": { "id": "proceedings/iv/2017/0831/0", "title": "2017 21st International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/03/08283817", "title": "Using Dashboard Networks to Visualize Multiple Patient Histories: A Design Study on Post-Operative Prostate Cancer", "doi": null, "abstractUrl": "/journal/tg/2019/03/08283817/17D45XacGi3", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mmrp/2019/1649/0/08665381", "title": "State of the Art and Perspectives in Multi-Layer Formats for Music Representation", "doi": null, "abstractUrl": "/proceedings-article/mmrp/2019/08665381/18qca4TKpHO", "parentPublication": { "id": "proceedings/mmrp/2019/1649/0", "title": "2019 International Workshop on Multilayer Music Representation and Processing (MMRP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2022/5908/0/09953880", "title": "Cross-Linguistic Study on Affective Impression and Language for Visual Art Using Neural Speaker", "doi": null, "abstractUrl": "/proceedings-article/acii/2022/09953880/1IAJZJu6Nmo", "parentPublication": { "id": "proceedings/acii/2022/5908/0", "title": "2022 10th International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2019/4896/0/489600a005", "title": "How Will Sense of Values Change during Art Appreciation?", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2019/489600a005/1gAx0SPxuvu", "parentPublication": { "id": "proceedings/icdmw/2019/4896/0", "title": "2019 International Conference on Data Mining Workshops (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2021/4065/0/406500a145", "title": "Creative Technologies - From VR and Art Aesthetics to Form and Function", "doi": null, "abstractUrl": "/proceedings-article/cw/2021/406500a145/1yBF3vMnz7q", "parentPublication": { "id": "proceedings/cw/2021/4065/0", "title": "2021 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08718533", "articleId": "1abD1LIMBlS", "__typename": "AdjacentArticleType" }, "next": { "fno": "08705364", "articleId": "19Jq5yQE0ZW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1mLIhS7wg7u", "name": "ttg202010-08676380s1-tvcg-pfluger-2908166-mm.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202010-08676380s1-tvcg-pfluger-2908166-mm.zip", "extension": "zip", "size": "27.3 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNzvQI13", "title": "Oct.", "year": "2020", "issueNum": "10", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "Oct.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "19Jq5yQE0ZW", "doi": "10.1109/TVCG.2019.2914676", "abstract": "We propose a method for adding small-scale details to surfaces of 3D geometries in the context of interactive deformation computation of elastic objects. This is relevant in real-time applications, for instance, in surgical simulation or interactive animation. The key idea is the procedural generation of surface details via a weighted sum of periodic functions, applied as an on-surface displacement field. We first calculate local deformation strains of a low-resolution 3D input mesh, which are then employed to estimate amplitudes, orientations, and positions of high-resolution details. The shapes and spatial frequencies of the periodic details are obtained from mechanical parameters, assuming the physical model of a film-substrate aggregate. Finally, our approach creates the highly-detailed output mesh fully on the GPU. The performance is independent of the spatial frequency of the inserted details as well as, within certain limits, of the resolution of the output mesh. We can reproduce numerous commonly observed, characteristic surface deformation patterns, such as wrinkles or buckles, allowing for the representation of a wide variety of simulated materials and interaction processes. We highlight the performance of our method with several examples.", "abstracts": [ { "abstractType": "Regular", "content": "We propose a method for adding small-scale details to surfaces of 3D geometries in the context of interactive deformation computation of elastic objects. This is relevant in real-time applications, for instance, in surgical simulation or interactive animation. The key idea is the procedural generation of surface details via a weighted sum of periodic functions, applied as an on-surface displacement field. We first calculate local deformation strains of a low-resolution 3D input mesh, which are then employed to estimate amplitudes, orientations, and positions of high-resolution details. The shapes and spatial frequencies of the periodic details are obtained from mechanical parameters, assuming the physical model of a film-substrate aggregate. Finally, our approach creates the highly-detailed output mesh fully on the GPU. The performance is independent of the spatial frequency of the inserted details as well as, within certain limits, of the resolution of the output mesh. We can reproduce numerous commonly observed, characteristic surface deformation patterns, such as wrinkles or buckles, allowing for the representation of a wide variety of simulated materials and interaction processes. We highlight the performance of our method with several examples.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose a method for adding small-scale details to surfaces of 3D geometries in the context of interactive deformation computation of elastic objects. This is relevant in real-time applications, for instance, in surgical simulation or interactive animation. The key idea is the procedural generation of surface details via a weighted sum of periodic functions, applied as an on-surface displacement field. We first calculate local deformation strains of a low-resolution 3D input mesh, which are then employed to estimate amplitudes, orientations, and positions of high-resolution details. The shapes and spatial frequencies of the periodic details are obtained from mechanical parameters, assuming the physical model of a film-substrate aggregate. Finally, our approach creates the highly-detailed output mesh fully on the GPU. The performance is independent of the spatial frequency of the inserted details as well as, within certain limits, of the resolution of the output mesh. We can reproduce numerous commonly observed, characteristic surface deformation patterns, such as wrinkles or buckles, allowing for the representation of a wide variety of simulated materials and interaction processes. We highlight the performance of our method with several examples.", "title": "Wrinkles, Folds, Creases, Buckles: Small-Scale Surface Deformations as Periodic Functions on 3D Meshes", "normalizedTitle": "Wrinkles, Folds, Creases, Buckles: Small-Scale Surface Deformations as Periodic Functions on 3D Meshes", "fno": "08705364", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Buckling", "Computational Geometry", "Computer Animation", "Deformation", "Elasticity", "Mesh Generation", "Solid Modelling", "Folds", "Creases", "Buckles", "Periodic Functions", "3 D Meshes", "Interactive Deformation Computation", "Elastic Objects", "Real Time Applications", "Surgical Simulation", "Interactive Animation", "Procedural Generation", "On Surface Displacement Field", "Local Deformation Strains", "Low Resolution 3 D Input Mesh", "High Resolution Details", "Spatial Frequencies", "Mechanical Parameters", "Physical Model", "Film Substrate Aggregate", "GPU", "Spatial Frequency", "Characteristic Surface Deformation Patterns", "Simulated Materials", "Interaction Processes", "3 D Geometries", "Surface Details Procedural Generation", "Strain", "Surface Morphology", "Three Dimensional Displays", "Geometry", "Graphics Processing Units", "Deformable Models", "Surface Waves", "Physically Based Interactive Simulation", "Small Scale Surface Deformations", "Procedural Mesh Synthesis" ], "authors": [ { "givenName": "Evgeny", "surname": "Zuenko", "fullName": "Evgeny Zuenko", "affiliation": "Department of Computer Science, Interactive Graphics and Simulation Group, University of Innsbruck, Innsbruck, Tyrol, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Matthias", "surname": "Harders", "fullName": "Matthias Harders", "affiliation": "Department of Computer Science, Interactive Graphics and Simulation Group, University of Innsbruck, Innsbruck, Tyrol, Austria", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "10", "pubDate": "2020-10-01 00:00:00", "pubType": "trans", "pages": "3077-3088", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/1999/5897/0/58970028", "title": "Animating Wrinkles on Clothes", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1999/58970028/12OmNAkWvLr", "parentPublication": { "id": "proceedings/ieee-vis/1999/5897/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2016/1437/0/1437a442", "title": "Discrete Optimisation for Group-Wise Cortical Surface Atlasing", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2016/1437a442/12OmNC4eSqp", "parentPublication": { "id": "proceedings/cvprw/2016/1437/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cit/2008/2357/0/04594641", "title": "Linear mesh deformation with snake constraint", "doi": null, "abstractUrl": "/proceedings-article/cit/2008/04594641/12OmNx5piS9", "parentPublication": { "id": "proceedings/cit/2008/2357/0", "title": "2008 8th IEEE International Conference on Computer and Information Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2007/2928/0/29280200", "title": "Multi-resolution Meshes Deformation Based on Pyramid Coordinates", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2007/29280200/12OmNzy7uQF", "parentPublication": { "id": "proceedings/cgiv/2007/2928/0", "title": "Computer Graphics, Imaging and Visualisation (CGIV 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2018/8425/0/842500a719", "title": "Residual MeshNet: Learning to Deform Meshes for Single-View 3D Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/3dv/2018/842500a719/17D45WwsQ7t", "parentPublication": { "id": "proceedings/3dv/2018/8425/0", "title": "2018 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09870173", "title": "NerfCap: Human Performance Capture With Dynamic Neural Radiance Fields", "doi": null, "abstractUrl": "/journal/tg/5555/01/09870173/1GgcSqKQSM8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2017/2636/0/263600a073", "title": "Surface Flattening Based on Energy Fabric Deformation Model in Garment Design", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2017/263600a073/1ap5xx2ft5e", "parentPublication": { "id": "proceedings/icvrv/2017/2636/0", "title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800a072", "title": "Neural Cages for Detail-Preserving 3D Deformations", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800a072/1m3o62Q32U0", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciev-&-icivpr/2020/9331/0/09306533", "title": "Towards Detailed 3D Modeling: Mesh Super-Resolution via Deformation", "doi": null, "abstractUrl": "/proceedings-article/iciev-&-icivpr/2020/09306533/1qcicuvmpcQ", "parentPublication": { "id": "proceedings/iciev-&-icivpr/2020/9331/0", "title": "2020 Joint 9th International Conference on Informatics, Electronics & Vision (ICIEV) and 2020 4th International Conference on Imaging, Vision & Pattern Recognition (icIVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2020/8128/0/812800b098", "title": "Saliency Guided Subdivision for Single-View Mesh Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/3dv/2020/812800b098/1qyxnB7fQvm", "parentPublication": { "id": "proceedings/3dv/2020/8128/0", "title": "2020 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08676380", "articleId": "18NkfV97jEc", "__typename": "AdjacentArticleType" }, "next": { "fno": "08695841", "articleId": "19sOOGyyt56", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1mLIg81thYc", "name": "ttg202010-08705364s1-supplementaryvideo.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202010-08705364s1-supplementaryvideo.mp4", "extension": "mp4", "size": "42.6 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNzvQI13", "title": "Oct.", "year": "2020", "issueNum": "10", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "Oct.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "19sOOGyyt56", "doi": "10.1109/TVCG.2019.2912607", "abstract": "Due to recent advances in virtual reality (VR) technology, the development of immersive VR applications that track body motions and visualize a full-body avatar is attracting increasing research interest. This paper reviews related research to gather and to critically analyze recent improvements regarding the potential of full-body motion reconstruction in VR applications. We conducted a systematic literature search, matching VR and full-body tracking related keywords on IEEE Xplore, PubMed, ACM, and Scopus. Fifty-three publications were included and assigned in three groups: studies using markerless and marker-based motion tracking systems as well as systems using inertial measurement units. All analyzed research publications track the motions of the user wearing a head-mounted display and visualize a full-body avatar. The analysis confirmed that a full-body avatar can enhance the sense of embodiment and can improve the immersion within the VR. The results indicated that the Kinect device is still the most frequently used sensor (27 out of 53). Furthermore, there is a trend to track the movements of multiple users simultaneously. Many studies that enable multiplayer mode in VR use marker-based systems (7 out of 17) because they are much more robust and can accurately track full-body movements of multiple users in real-time.", "abstracts": [ { "abstractType": "Regular", "content": "Due to recent advances in virtual reality (VR) technology, the development of immersive VR applications that track body motions and visualize a full-body avatar is attracting increasing research interest. This paper reviews related research to gather and to critically analyze recent improvements regarding the potential of full-body motion reconstruction in VR applications. We conducted a systematic literature search, matching VR and full-body tracking related keywords on IEEE Xplore, PubMed, ACM, and Scopus. Fifty-three publications were included and assigned in three groups: studies using markerless and marker-based motion tracking systems as well as systems using inertial measurement units. All analyzed research publications track the motions of the user wearing a head-mounted display and visualize a full-body avatar. The analysis confirmed that a full-body avatar can enhance the sense of embodiment and can improve the immersion within the VR. The results indicated that the Kinect device is still the most frequently used sensor (27 out of 53). Furthermore, there is a trend to track the movements of multiple users simultaneously. Many studies that enable multiplayer mode in VR use marker-based systems (7 out of 17) because they are much more robust and can accurately track full-body movements of multiple users in real-time.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Due to recent advances in virtual reality (VR) technology, the development of immersive VR applications that track body motions and visualize a full-body avatar is attracting increasing research interest. This paper reviews related research to gather and to critically analyze recent improvements regarding the potential of full-body motion reconstruction in VR applications. We conducted a systematic literature search, matching VR and full-body tracking related keywords on IEEE Xplore, PubMed, ACM, and Scopus. Fifty-three publications were included and assigned in three groups: studies using markerless and marker-based motion tracking systems as well as systems using inertial measurement units. All analyzed research publications track the motions of the user wearing a head-mounted display and visualize a full-body avatar. The analysis confirmed that a full-body avatar can enhance the sense of embodiment and can improve the immersion within the VR. The results indicated that the Kinect device is still the most frequently used sensor (27 out of 53). Furthermore, there is a trend to track the movements of multiple users simultaneously. Many studies that enable multiplayer mode in VR use marker-based systems (7 out of 17) because they are much more robust and can accurately track full-body movements of multiple users in real-time.", "title": "A Survey of Full-Body Motion Reconstruction in Immersive Virtual Reality Applications", "normalizedTitle": "A Survey of Full-Body Motion Reconstruction in Immersive Virtual Reality Applications", "fno": "08695841", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Avatars", "Helmet Mounted Displays", "Image Motion Analysis", "Image Reconstruction", "Full Body Movements", "Full Body Motion Reconstruction", "Immersive Virtual Reality Applications", "Immersive VR Applications", "Body Motion Tracking", "Full Body Tracking Related Keywords", "Markerless Marker Based Motion Tracking Systems", "Kinect Device", "Multiplayer Mode", "Virtual Reality Technology", "Head Mounted Display", "Full Body Avatar Visualization", "Tracking", "Avatars", "Real Time Systems", "Resists", "Games", "Cameras", "Robot Sensing Systems", "Virtual Reality", "Full Body Tracking", "Motion Reconstruction", "Markerless Motion Capture", "Marker Based Motion Capture", "Inertial Measurement Units", "Full Body Avatar" ], "authors": [ { "givenName": "Polona", "surname": "Caserman", "fullName": "Polona Caserman", "affiliation": "Multimedia Communications Lab, Technische Universität Darmstadt, Darmstadt, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Augusto", "surname": "Garcia-Agundez", "fullName": "Augusto Garcia-Agundez", "affiliation": "Multimedia Communications Lab, Technische Universität Darmstadt, Darmstadt, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Stefan", "surname": "Göbel", "fullName": "Stefan Göbel", "affiliation": "Multimedia Communications Lab, Technische Universität Darmstadt, Darmstadt, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "10", "pubDate": "2020-10-01 00:00:00", "pubType": "trans", "pages": "3089-3108", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2014/2871/0/06802099", "title": "Full body interaction in virtual reality with affordable hardware", "doi": null, "abstractUrl": "/proceedings-article/vr/2014/06802099/12OmNy2agQj", "parentPublication": { "id": "proceedings/vr/2014/2871/0", "title": "2014 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446229", "title": "Any “Body” There? Avatar Visibility Effects in a Virtual Reality Game", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446229/13bd1fHrlRx", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699215", "title": "VIRTOOAIR: Virtual Reality TOOlbox for Avatar Intelligent Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699215/19F1Ug56qB2", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2021/3734/0/373400a123", "title": "Inverse kinematics for full-body self representation in VR-based cognitive rehabilitation", "doi": null, "abstractUrl": "/proceedings-article/ism/2021/373400a123/1A3j8qqoU8g", "parentPublication": { "id": "proceedings/ism/2021/3734/0", "title": "2021 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/05/09714051", "title": "Augmenting Immersive Telepresence Experience with a Virtual Body", "doi": null, "abstractUrl": "/journal/tg/2022/05/09714051/1B0Y0I5xWyk", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a818", "title": "Towards Controlling Whole Body Avatars with Partial Body-Tracking and Environmental Information", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a818/1CJeftFqI5W", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a666", "title": "Investigating User Embodiment of Inverse-Kinematic Avatars in Smartphone Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a666/1JrR5i5jDhe", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798169", "title": "Stand-alone, Wearable System for Full Body VR Avatars: Towards Physics-based 3D Interaction", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798169/1cJ126EVaVi", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798044", "title": "Effect of Full Body Avatar in Augmented Reality Remote Collaboration", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798044/1cJ14GMFJdK", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090557", "title": "Martial Arts Training in Virtual Reality with Full-body Tracking and Physically Simulated Opponents", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090557/1jIxnRidIsM", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08705364", "articleId": "19Jq5yQE0ZW", "__typename": "AdjacentArticleType" }, "next": { "fno": "08674573", "articleId": "18IltNVMVe8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzvQI13", "title": "Oct.", "year": "2020", "issueNum": "10", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "Oct.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "18IltNVMVe8", "doi": "10.1109/TVCG.2019.2907583", "abstract": "Building Information Modeling (BIM) employs data-rich 3D CAD models for large-scale facility design, construction, and operation. These complex datasets contain a large amount and variety of information, ranging from design specifications to real-time sensor data. They are used by architects and engineers for various analysis and simulations throughout a facility's life cycle. Many techniques from different visualization fields could be used to analyze these data. However, the BIM domain still remains largely unexplored by the visualization community. The goal of this article is to encourage visualization researchers to increase their involvement with BIM. To this end, we present the results of a systematic review of visualization in current BIM practice. We use a novel taxonomy to identify main application areas and analyze commonly employed techniques. From this domain characterization, we highlight future research opportunities brought forth by the unique features of BIM. For instance, exploring the synergies between scientific and information visualization to integrate spatial and non-spatial data. We hope this article raises awareness to interesting new challenges the BIM domain brings to the visualization community.", "abstracts": [ { "abstractType": "Regular", "content": "Building Information Modeling (BIM) employs data-rich 3D CAD models for large-scale facility design, construction, and operation. These complex datasets contain a large amount and variety of information, ranging from design specifications to real-time sensor data. They are used by architects and engineers for various analysis and simulations throughout a facility's life cycle. Many techniques from different visualization fields could be used to analyze these data. However, the BIM domain still remains largely unexplored by the visualization community. The goal of this article is to encourage visualization researchers to increase their involvement with BIM. To this end, we present the results of a systematic review of visualization in current BIM practice. We use a novel taxonomy to identify main application areas and analyze commonly employed techniques. From this domain characterization, we highlight future research opportunities brought forth by the unique features of BIM. For instance, exploring the synergies between scientific and information visualization to integrate spatial and non-spatial data. We hope this article raises awareness to interesting new challenges the BIM domain brings to the visualization community.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Building Information Modeling (BIM) employs data-rich 3D CAD models for large-scale facility design, construction, and operation. These complex datasets contain a large amount and variety of information, ranging from design specifications to real-time sensor data. They are used by architects and engineers for various analysis and simulations throughout a facility's life cycle. Many techniques from different visualization fields could be used to analyze these data. However, the BIM domain still remains largely unexplored by the visualization community. The goal of this article is to encourage visualization researchers to increase their involvement with BIM. To this end, we present the results of a systematic review of visualization in current BIM practice. We use a novel taxonomy to identify main application areas and analyze commonly employed techniques. From this domain characterization, we highlight future research opportunities brought forth by the unique features of BIM. For instance, exploring the synergies between scientific and information visualization to integrate spatial and non-spatial data. We hope this article raises awareness to interesting new challenges the BIM domain brings to the visualization community.", "title": "A Systematic Review of Visualization in Building Information Modeling", "normalizedTitle": "A Systematic Review of Visualization in Building Information Modeling", "fno": "08674573", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Building Management Systems", "Buildings Structures", "CAD", "Construction Industry", "Data Visualisation", "Information Management", "Real Time Systems", "Sensors", "Solid Modelling", "Structural Engineering Computing", "Design Specifications", "Real Time Sensor Data", "Visualization Community", "Building Information Modeling", "3 D CAD Models", "Complex Datasets", "Construction Process", "Visualization", "Solid Modeling", "Data Visualization", "Task Analysis", "Taxonomy", "Systematics", "Computational Modeling", "Visualization Techniques And Methodologies", "Information Visualization", "Computer Aided Design", "Building Information Modeling", "Survey" ], "authors": [ { "givenName": "Paulo", "surname": "Ivson", "fullName": "Paulo Ivson", "affiliation": "Tecgraf Institute/PUC-Rio, Brazil", "__typename": "ArticleAuthorType" }, { "givenName": "André", "surname": "Moreira", "fullName": "André Moreira", "affiliation": "Tecgraf Institute/PUC-Rio, Brazil", "__typename": "ArticleAuthorType" }, { "givenName": "Francisco", "surname": "Queiroz", "fullName": "Francisco Queiroz", "affiliation": "Tecgraf Institute/PUC-Rio, Brazil", "__typename": "ArticleAuthorType" }, { "givenName": "Wallas", "surname": "Santos", "fullName": "Wallas Santos", "affiliation": "IBM, Armonk, NY, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Waldemar", "surname": "Celes", "fullName": "Waldemar Celes", "affiliation": "Tecgraf Institute/PUC-Rio, Brazil", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "10", "pubDate": "2020-10-01 00:00:00", "pubType": "trans", "pages": "3109-3127", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/fie/2015/8454/0/07344058", "title": "Building information modeling laboratory exercises in a construction science and management building systems course", "doi": null, "abstractUrl": "/proceedings-article/fie/2015/07344058/12OmNqESubo", "parentPublication": { "id": "proceedings/fie/2015/8454/0", "title": "2015 IEEE Frontiers in Education Conference (FIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csit/2016/8914/0/07549475", "title": "Software evolution visualization techniques and methods - a systematic review", "doi": null, "abstractUrl": "/proceedings-article/csit/2016/07549475/12OmNrYCXO2", "parentPublication": { "id": "proceedings/csit/2016/8914/0", "title": "2016 7th International Conference on Computer Science and Information Technology (CSIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/re/2016/4121/0/4121a006", "title": "Requirements Engineering Visualization: A Systematic Literature Review", "doi": null, "abstractUrl": "/proceedings-article/re/2016/4121a006/12OmNyRxFzB", "parentPublication": { "id": "proceedings/re/2016/4121/0", "title": "2016 IEEE 24th International Requirements Engineering Conference (RE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2014/2874/0/2874a311", "title": "Visualization of Building Performance Simulation Results: State-of-the-Art and Future Directions", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2014/2874a311/12OmNzn393w", "parentPublication": { "id": "proceedings/pacificvis/2014/2874/0", "title": "2014 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/12/ttg2013122818", "title": "A Systematic Review on the Practice of Evaluating Visualization", "doi": null, "abstractUrl": "/journal/tg/2013/12/ttg2013122818/13rRUwd9CLN", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2017/04/mcg2017040103", "title": "Pathways for Theoretical Advances in Visualization", "doi": null, "abstractUrl": "/magazine/cg/2017/04/mcg2017040103/13rRUzpzeDD", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2022/9007/0/900700a048", "title": "A Systematic Literature Review of Solution-Space Visualization Approaches in the Context of Optimization Problems", "doi": null, "abstractUrl": "/proceedings-article/iv/2022/900700a048/1KaH0fxFs4M", "parentPublication": { "id": "proceedings/iv/2022/9007/0", "title": "2022 26th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ispcem/2022/9271/0/927100a204", "title": "Research on Building BIM Intelligent Training System Based on Computer 3D Technology", "doi": null, "abstractUrl": "/proceedings-article/ispcem/2022/927100a204/1LHcUdTNOAU", "parentPublication": { "id": "proceedings/ispcem/2022/9271/0", "title": "2022 2nd International Signal Processing, Communications and Engineering Management Conference (ISPCEM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/01/09552846", "title": "A Critical Reflection on Visualization Research: Where Do Decision Making Tasks Hide?", "doi": null, "abstractUrl": "/journal/tg/2022/01/09552846/1xibYOLsNc4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/conisoft/2021/4361/0/436100a034", "title": "Information Visualization In Adaptable Dashboards For Smart Cities: A Systematic Review", "doi": null, "abstractUrl": "/proceedings-article/conisoft/2021/436100a034/1zHIifIcW4w", "parentPublication": { "id": "proceedings/conisoft/2021/4361/0", "title": "2021 9th International Conference in Software Engineering Research and Innovation (CONISOFT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08695841", "articleId": "19sOOGyyt56", "__typename": "AdjacentArticleType" }, "next": { "fno": "08684314", "articleId": "1keqQTYCS64", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1mLIjxCRyO4", "name": "ttg202010-08674573s1-tvcg-ivson-2907583-mm.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202010-08674573s1-tvcg-ivson-2907583-mm.zip", "extension": "zip", "size": "778 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNzvQI13", "title": "Oct.", "year": "2020", "issueNum": "10", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "Oct.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1keqQTYCS64", "doi": "10.1109/TVCG.2019.2909881", "abstract": "As part of an evaluation process of user experience realism in a Virtual Reality (VR) system, we focus in this paper on one of the core characteristics of vision: the relationship between contrast and luminance. The experiment aims at validating in VR reaction time predictions given by Rea and Ouellette's model. The subjects have to distinguish, as fast as they can, a target object from an uniform background. Our results did not match the predictions of the model. Our subjects showed higher performance in performing the task than expected. At low level of contrast, our subjects could easily perceive a target they should not have been able to see at all. This is explained by the size of the visual field surrounding the target: at low level of visibility, the larger the surrounding, the easier perception the is. We conclude that the Rea and Ouellette's model could be applied in VR if a specific visual field size factor was added.", "abstracts": [ { "abstractType": "Regular", "content": "As part of an evaluation process of user experience realism in a Virtual Reality (VR) system, we focus in this paper on one of the core characteristics of vision: the relationship between contrast and luminance. The experiment aims at validating in VR reaction time predictions given by Rea and Ouellette's model. The subjects have to distinguish, as fast as they can, a target object from an uniform background. Our results did not match the predictions of the model. Our subjects showed higher performance in performing the task than expected. At low level of contrast, our subjects could easily perceive a target they should not have been able to see at all. This is explained by the size of the visual field surrounding the target: at low level of visibility, the larger the surrounding, the easier perception the is. We conclude that the Rea and Ouellette's model could be applied in VR if a specific visual field size factor was added.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "As part of an evaluation process of user experience realism in a Virtual Reality (VR) system, we focus in this paper on one of the core characteristics of vision: the relationship between contrast and luminance. The experiment aims at validating in VR reaction time predictions given by Rea and Ouellette's model. The subjects have to distinguish, as fast as they can, a target object from an uniform background. Our results did not match the predictions of the model. Our subjects showed higher performance in performing the task than expected. At low level of contrast, our subjects could easily perceive a target they should not have been able to see at all. This is explained by the size of the visual field surrounding the target: at low level of visibility, the larger the surrounding, the easier perception the is. We conclude that the Rea and Ouellette's model could be applied in VR if a specific visual field size factor was added.", "title": "Application of a Relative Visual Performance Model in a Virtual Reality Immersive System", "normalizedTitle": "Application of a Relative Visual Performance Model in a Virtual Reality Immersive System", "fno": "08684314", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Vision", "Object Detection", "Virtual Reality", "Visual Perception", "Ouellettes Model", "Relative Visual Performance Model", "Virtual Reality Immersive System", "Evaluation Process", "User Experience Realism", "Luminance", "VR Reaction Time Predictions", "Target Object", "Visual Field Size Factor", "Rea Model", "Solid Modeling", "Computational Modeling", "Image Color Analysis", "Task Analysis", "Face", "Visualization", "Virtual Reality", "Contrast", "Luminance", "Visual Performance", "Virtual Reality" ], "authors": [ { "givenName": "Benoit", "surname": "Perroud", "fullName": "Benoit Perroud", "affiliation": "Le2i (FRE 2005) Lab, Arts et Métiers, CNRS, Institut Image, 2 rue T. Dumorey, Univ. Bourgogne Franche-Comté, HeSam, Chalon-sur-Saone, France", "__typename": "ArticleAuthorType" }, { "givenName": "Stéphane", "surname": "Régnier", "fullName": "Stéphane Régnier", "affiliation": "VR and Immersive Simulation Center, Renault, Guyancourt, France", "__typename": "ArticleAuthorType" }, { "givenName": "Andras", "surname": "Kemeny", "fullName": "Andras Kemeny", "affiliation": "Le2i (FRE 2005) Lab, Arts et Métiers, CNRS, Institut Image, 2 rue T. Dumorey, Univ. Bourgogne Franche-Comté, HeSam, Chalon-sur-Saone, France", "__typename": "ArticleAuthorType" }, { "givenName": "Frédéric", "surname": "Mérienne", "fullName": "Frédéric Mérienne", "affiliation": "Le2i (FRE 2005) lab, Arts et Métiers, CNRS, HeSam, Institut Image, 2 rue T. Dumorey, Univ. Bourgogne Franche-Comté, Chalon-sur-Saone, France", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "10", "pubDate": "2020-10-01 00:00:00", "pubType": "trans", "pages": "3128-3132", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/itag/2014/6795/0/6795a051", "title": "Immersive Virtual Reality Deployment in a Lean Manufacturing Environment", "doi": null, "abstractUrl": "/proceedings-article/itag/2014/6795a051/12OmNqBKU2C", "parentPublication": { "id": "proceedings/itag/2014/6795/0", "title": "2014 International Conference on Interactive Technologies and Games (iTAG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isuvr/2017/3091/0/3091a020", "title": "Visual Representation of Gesture Interaction Feedback in Virtual Reality Games", "doi": null, "abstractUrl": "/proceedings-article/isuvr/2017/3091a020/12OmNx5Yviz", "parentPublication": { "id": "proceedings/isuvr/2017/3091/0", "title": "2017 International Symposium on Ubiquitous Virtual Reality (ISUVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisa/2012/1402/0/06220974", "title": "Embedded Virtual Reality for Travel Security", "doi": null, "abstractUrl": "/proceedings-article/icisa/2012/06220974/12OmNyNzhx5", "parentPublication": { "id": "proceedings/icisa/2012/1402/0", "title": "2012 International Conference on Information Science and Applications (ICISA 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446466", "title": "Predicting Performance During a Dynamic Target Acquisition Task in Immersive Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446466/13bd1fdV4lE", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2016/02/mcg2016020036", "title": "Using Immersive Virtual Reality to Reduce Work Accidents in Developing Countries", "doi": null, "abstractUrl": "/magazine/cg/2016/02/mcg2016020036/13rRUzpzeF4", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/05/08998292", "title": "Immersive Process Model Exploration in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2020/05/08998292/1hpPCy1gJoI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a633", "title": "Immersive Authoring of Virtual Reality Training", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a633/1tnXNG6t1x6", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/transai/2021/3412/0/341200a017", "title": "An Immersive Model of User Trust in Conversational Agents in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/transai/2021/341200a017/1xNNxZKbIzK", "parentPublication": { "id": "proceedings/transai/2021/3412/0", "title": "2021 Third International Conference on Transdisciplinary AI (TransAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/04/09664291", "title": "EHTask: Recognizing User Tasks From Eye and Head Movements in Immersive Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2023/04/09664291/1zHDIPIlNBe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2021/06/09646533", "title": "Leveraging Human Visual Perception for an Optimized Virtual Reality Experience", "doi": null, "abstractUrl": "/magazine/cg/2021/06/09646533/1zdLEz8z0ac", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08674573", "articleId": "18IltNVMVe8", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNAZx8On", "title": "May/June", "year": "2009", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "15", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxjQybN", "doi": "10.1109/TVCG.2008.192", "abstract": "We present a new algorithm for finding a most \"developable\" smooth mesh surface to interpolate a given set of arbitrary points or space curves. Inspired by the recent progress in mesh editing that employs the concepts of preserving the Laplacian coordinates and handle-based shape editing, we formulate the interpolation problem as a mesh deformation process that transforms an initial developable mesh surface, such as a planar figure, to a final mesh surface that interpolates the given points and/or curves. During the deformation, the developability of the intermediate mesh is maintained by means of preserving the zero-valued Gaussian curvature on the mesh. To treat the high nonlinearity of the geometric constrains owing to the preservation of Gaussian curvature, we linearize those nonlinear constraints using Taylor expansion and eventually construct a sparse and over-determined linear system which is subsequently solved by a robust least-squares solution. By iteratively performing this procedure, the initial mesh is gradually and smoothly \"dragged\" to the given points and/or curves. The initial experimental data has shown some promising aspects of the proposed algorithm as a general quasi-developable surface interpolation tool.", "abstracts": [ { "abstractType": "Regular", "content": "We present a new algorithm for finding a most \"developable\" smooth mesh surface to interpolate a given set of arbitrary points or space curves. Inspired by the recent progress in mesh editing that employs the concepts of preserving the Laplacian coordinates and handle-based shape editing, we formulate the interpolation problem as a mesh deformation process that transforms an initial developable mesh surface, such as a planar figure, to a final mesh surface that interpolates the given points and/or curves. During the deformation, the developability of the intermediate mesh is maintained by means of preserving the zero-valued Gaussian curvature on the mesh. To treat the high nonlinearity of the geometric constrains owing to the preservation of Gaussian curvature, we linearize those nonlinear constraints using Taylor expansion and eventually construct a sparse and over-determined linear system which is subsequently solved by a robust least-squares solution. By iteratively performing this procedure, the initial mesh is gradually and smoothly \"dragged\" to the given points and/or curves. The initial experimental data has shown some promising aspects of the proposed algorithm as a general quasi-developable surface interpolation tool.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a new algorithm for finding a most \"developable\" smooth mesh surface to interpolate a given set of arbitrary points or space curves. Inspired by the recent progress in mesh editing that employs the concepts of preserving the Laplacian coordinates and handle-based shape editing, we formulate the interpolation problem as a mesh deformation process that transforms an initial developable mesh surface, such as a planar figure, to a final mesh surface that interpolates the given points and/or curves. During the deformation, the developability of the intermediate mesh is maintained by means of preserving the zero-valued Gaussian curvature on the mesh. To treat the high nonlinearity of the geometric constrains owing to the preservation of Gaussian curvature, we linearize those nonlinear constraints using Taylor expansion and eventually construct a sparse and over-determined linear system which is subsequently solved by a robust least-squares solution. By iteratively performing this procedure, the initial mesh is gradually and smoothly \"dragged\" to the given points and/or curves. The initial experimental data has shown some promising aspects of the proposed algorithm as a general quasi-developable surface interpolation tool.", "title": "Quasi-Developable Mesh Surface Interpolation via Mesh Deformation", "normalizedTitle": "Quasi-Developable Mesh Surface Interpolation via Mesh Deformation", "fno": "ttg2009030518", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Interpolation", "Laplace Equations", "Iterative Algorithms", "Surface Treatment", "Robustness", "Clothing", "Costs", "Minimization Methods", "Developable Surface", "Least Squares Methods", "Computer Aided Design", "Surface Fitting" ], "authors": [ { "givenName": null, "surname": "Kai Tang", "fullName": "Kai Tang", "affiliation": "Mech. Eng. Dept., Hong Kong Univ. of Sci. & Technol., Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": null, "surname": "Ming Chen", "fullName": "Ming Chen", "affiliation": "Mech. Eng. Dept., Hong Kong Univ. of Sci. & Technol., Hong Kong", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2009-05-01 00:00:00", "pubType": "trans", "pages": "1-1", "year": "2009", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/isdea/2012/4608/0/4608a129", "title": "A Novel Surface Flattening Method Based on Mesh Edges", "doi": null, "abstractUrl": "/proceedings-article/isdea/2012/4608a129/12OmNBNM8Pa", "parentPublication": { "id": "proceedings/isdea/2012/4608/0", "title": "2012 Second International Conference on Intelligent System Design and Engineering Application", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/robot/1988/0852/0/00012342", "title": "Matching of developable surfaces", "doi": null, "abstractUrl": "/proceedings-article/robot/1988/00012342/12OmNwxlrgR", "parentPublication": { "id": "proceedings/robot/1988/0852/0", "title": "Proceedings. 1988 IEEE International Conference on Robotics and Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icic/2011/688/0/05954507", "title": "Surface Construction Based on Quadrilateral Mesh", "doi": null, "abstractUrl": "/proceedings-article/icic/2011/05954507/12OmNy5R3tw", "parentPublication": { "id": "proceedings/icic/2011/688/0", "title": "2011 Fourth International Conference on Information and Computing (ICIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109a197", "title": "Estimation of Non-rigid Surface Deformation Using Developable Surface Model", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109a197/12OmNy6qfPR", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cad-graphics/2013/2576/0/06815055", "title": "Scalable Mesh Deformation with Controllable Stiffness", "doi": null, "abstractUrl": "/proceedings-article/cad-graphics/2013/06815055/12OmNyS6RAH", "parentPublication": { "id": "proceedings/cad-graphics/2013/2576/0", "title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2009/3725/0/3725a226", "title": "Locally Developable Constraint for Document Surface Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/icdar/2009/3725a226/12OmNyrIaCe", "parentPublication": { "id": "proceedings/icdar/2009/3725/0", "title": "2009 10th International Conference on Document Analysis and Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smi/2008/2260/0/04547959", "title": "A least-norm approach to flattenable mesh surface processing", "doi": null, "abstractUrl": "/proceedings-article/smi/2008/04547959/12OmNz3bdDN", "parentPublication": { "id": "proceedings/smi/2008/2260/0", "title": "IEEE International Conference on Shape Modeling and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/snpd/2007/2909/3/290930210", "title": "Geometric Design of Developable Bezier and B-spline Parametric Surfaces", "doi": null, "abstractUrl": "/proceedings-article/snpd/2007/290930210/12OmNz4SOBm", "parentPublication": { "id": "proceedings/snpd/2007/2909/3", "title": "Eighth ACIS International Conference on Software Engineering, Artificial Intelligence, Networking, and Parallel/Distributed Computing (SNPD 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/01/ttg2008010025", "title": "Computing Length-Preserved Free Boundary for Quasi-Developable Mesh Segmentation", "doi": null, "abstractUrl": "/journal/tg/2008/01/ttg2008010025/13rRUxBrGgP", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcc-smartcity-dss/2017/2588/0/08291962", "title": "Integrated Quality Mesh Generation for Poisson Surface Reconstruction in HPC Applications", "doi": null, "abstractUrl": "/proceedings-article/hpcc-smartcity-dss/2017/08291962/17D45VsBTYE", "parentPublication": { "id": "proceedings/hpcc-smartcity-dss/2017/2588/0", "title": "2017 IEEE 19th International Conference on High Performance Computing and Communications; IEEE 15th International Conference on Smart City; IEEE 3rd International Conference on Data Science and Systems (HPCC/SmartCity/DSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": null, "next": { "fno": "ttg2009030353", "articleId": "13rRUwfI0Q2", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNAZx8On", "title": "May/June", "year": "2009", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "15", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwfI0Q2", "doi": "10.1109/TVCG.2009.37", "abstract": null, "abstracts": [ { "abstractType": "Regular", "content": "", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": null, "title": "Guest Editor's Introduction: Special Section on the IEEE Virtual Reality Conference (VR)", "normalizedTitle": "Guest Editor's Introduction: Special Section on the IEEE Virtual Reality Conference (VR)", "fno": "ttg2009030353", "hasPdf": true, "idPrefix": "tg", "keywords": [], "authors": [ { "givenName": "Ming C.", "surname": "Lin", "fullName": "Ming C. Lin", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Anthony", "surname": "Steed", "fullName": "Anthony Steed", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Carolina", "surname": "Cruz-Neira", "fullName": "Carolina Cruz-Neira", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "03", "pubDate": "2009-05-01 00:00:00", "pubType": "trans", "pages": "353-354", "year": "2009", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tg/2009/01/ttg2009010004", "title": "Guest Editor's Introduction: Special Section on VRST", "doi": null, "abstractUrl": "/journal/tg/2009/01/ttg2009010004/13rRUwI5TQR", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2000/02/v0097", "title": "Guest Editor's Introduction: Special Section on Visualization", "doi": null, "abstractUrl": "/journal/tg/2000/02/v0097/13rRUwI5U7Q", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/01/ttg2011010001", "title": "Guest Editor's Introduction Special Section on the Virtual Reality Conference (VR)", "doi": null, "abstractUrl": "/journal/tg/2011/01/ttg2011010001/13rRUwIF6l4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2003/02/v0113", "title": "Guest Editor's Introduction: Special Section on IEEE Visualization", "doi": null, "abstractUrl": "/journal/tg/2003/02/v0113/13rRUx0geuZ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/04/ttg2008040725", "title": "Guest Editor's Introduction: Special Section on EuroVis", "doi": null, "abstractUrl": "/journal/tg/2008/04/ttg2008040725/13rRUxASuAq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/03/ttg2008030485", "title": "Guest Editor's Introduction: Special Section on Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2008/03/ttg2008030485/13rRUxDItha", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/1996/04/k0513", "title": "GUEST EDITOR'S INTRODUCTION: Special Section on Digital Libraries", "doi": null, "abstractUrl": "/journal/tk/1996/04/k0513/13rRUy0qnLO", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/1999/02/00773802", "title": "Guest Editor's introduction: special section on visualization", "doi": null, "abstractUrl": "/journal/tg/1999/02/00773802/13rRUyfKIHw", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/1999/04/e0435", "title": "Guest Editor's Special Section Introduction", "doi": null, "abstractUrl": "/journal/ts/1999/04/e0435/13rRUygT7gS", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2009030518", "articleId": "13rRUxjQybN", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2009030355", "articleId": "13rRUwdrdSt", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNAZx8On", "title": "May/June", "year": "2009", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "15", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwdrdSt", "doi": "10.1109/TVCG.2008.190", "abstract": "We describe a novel markerless camera tracking approach and user interaction methodology for augmented reality (AR) on unprepared tabletop environments. We propose a real-time system architecture that combines two types of feature tracking. Distinctive image features of the scene are detected and tracked frame-to-frame by computing optical flow. In order to achieve real-time performance, multiple operations are processed in a synchronized multi-threaded manner: capturing a video frame, tracking features using optical flow, detecting distinctive invariant features, and rendering an output frame. We also introduce user interaction methodology for establishing a global coordinate system and for placing virtual objects in the AR environment by tracking a user's outstretched hand and estimating a camera pose relative to it. We evaluate the speed and accuracy of our hybrid feature tracking approach, and demonstrate a proof-of-concept application for enabling AR in unprepared tabletop environments, using bare hands for interaction.", "abstracts": [ { "abstractType": "Regular", "content": "We describe a novel markerless camera tracking approach and user interaction methodology for augmented reality (AR) on unprepared tabletop environments. We propose a real-time system architecture that combines two types of feature tracking. Distinctive image features of the scene are detected and tracked frame-to-frame by computing optical flow. In order to achieve real-time performance, multiple operations are processed in a synchronized multi-threaded manner: capturing a video frame, tracking features using optical flow, detecting distinctive invariant features, and rendering an output frame. We also introduce user interaction methodology for establishing a global coordinate system and for placing virtual objects in the AR environment by tracking a user's outstretched hand and estimating a camera pose relative to it. We evaluate the speed and accuracy of our hybrid feature tracking approach, and demonstrate a proof-of-concept application for enabling AR in unprepared tabletop environments, using bare hands for interaction.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We describe a novel markerless camera tracking approach and user interaction methodology for augmented reality (AR) on unprepared tabletop environments. We propose a real-time system architecture that combines two types of feature tracking. Distinctive image features of the scene are detected and tracked frame-to-frame by computing optical flow. In order to achieve real-time performance, multiple operations are processed in a synchronized multi-threaded manner: capturing a video frame, tracking features using optical flow, detecting distinctive invariant features, and rendering an output frame. We also introduce user interaction methodology for establishing a global coordinate system and for placing virtual objects in the AR environment by tracking a user's outstretched hand and estimating a camera pose relative to it. We evaluate the speed and accuracy of our hybrid feature tracking approach, and demonstrate a proof-of-concept application for enabling AR in unprepared tabletop environments, using bare hands for interaction.", "title": "Multithreaded Hybrid Feature Tracking for Markerless Augmented Reality", "normalizedTitle": "Multithreaded Hybrid Feature Tracking for Markerless Augmented Reality", "fno": "ttg2009030355", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Virtual Reality", "Scene Analysis" ], "authors": [ { "givenName": "Taehee", "surname": "Lee", "fullName": "Taehee Lee", "affiliation": "University of California, Los Angeles, Los Angeles", "__typename": "ArticleAuthorType" }, { "givenName": "Tobias", "surname": "Höllerer", "fullName": "Tobias Höllerer", "affiliation": "University of California, Santa Barbara, Santa Barbara", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2009-05-01 00:00:00", "pubType": "trans", "pages": "355-368", "year": "2009", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2005/8929/0/01492784", "title": "Dynamic Texturing of Real Objects in an Augmented Reality System", "doi": null, "abstractUrl": "/proceedings-article/vr/2005/01492784/12OmNAnuTkI", "parentPublication": { "id": "proceedings/vr/2005/8929/0", "title": "IEEE Virtual Reality 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2015/1727/0/07223348", "title": "A building-wide indoor tracking system for augmented reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223348/12OmNBQkx3d", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iwar/1999/0359/0/03590003", "title": "Vision-Based Pose Computation: Robust and Accurate Augmented Reality Tracking", "doi": null, "abstractUrl": "/proceedings-article/iwar/1999/03590003/12OmNBuL1fO", "parentPublication": { "id": "proceedings/iwar/1999/0359/0", "title": "Augmented Reality, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2007/1749/0/04538856", "title": "Initializing Markerless Tracking Using a Simple Hand Gesture", "doi": null, "abstractUrl": "/proceedings-article/ismar/2007/04538856/12OmNCga1Th", "parentPublication": { "id": "proceedings/ismar/2007/1749/0", "title": "2007 6th IEEE and ACM International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2011/0039/0/05759460", "title": "Mobile Augmented Reality at the Hollywood Walk of Fame", "doi": null, "abstractUrl": "/proceedings-article/vr/2011/05759460/12OmNrIaea8", "parentPublication": { "id": "proceedings/vr/2011/0039/0", "title": "2011 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2013/4983/0/4983a226", "title": "A Markerless Augmented Reality System for Mobile Devices", "doi": null, "abstractUrl": "/proceedings-article/crv/2013/4983a226/12OmNrNh0u8", "parentPublication": { "id": "proceedings/crv/2013/4983/0", "title": "2013 International Conference on Computer and Robot Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2005/8929/0/01492781", "title": "Dynamic texturing of real objects in an augmented reality system", "doi": null, "abstractUrl": "/proceedings-article/vr/2005/01492781/12OmNwkhTh2", "parentPublication": { "id": "proceedings/vr/2005/8929/0", "title": "IEEE Virtual Reality 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2008/1971/0/04480766", "title": "Hybrid Feature Tracking and User Interaction for Markerless Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2008/04480766/12OmNyaGeH3", "parentPublication": { "id": "proceedings/vr/2008/1971/0", "title": "IEEE Virtual Reality 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iswc/2007/1452/0/04373785", "title": "Handy AR: Markerless Inspection of Augmented Reality Objects Using Fingertip Tracking", "doi": null, "abstractUrl": "/proceedings-article/iswc/2007/04373785/12OmNzxPTMb", "parentPublication": { "id": "proceedings/iswc/2007/1452/0", "title": "2007 11th IEEE International Symposium on Wearable Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/10/ttg2011101369", "title": "Shape Recognition and Pose Estimation for Mobile Augmented Reality", "doi": null, "abstractUrl": "/journal/tg/2011/10/ttg2011101369/13rRUyft7D0", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2009030353", "articleId": "13rRUwfI0Q2", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2009030369", "articleId": "13rRUxly8SR", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNAZx8On", "title": "May/June", "year": "2009", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "15", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxly8SR", "doi": "10.1109/TVCG.2008.195", "abstract": "This paper presents Mixed Reality Humans (MRHs), a new type of embodied agent enabling touch-driven communication. Affording touch between human and agent allows MRHs to simulate interpersonal scenarios in which touch is crucial. Two studies provide initial evaluation of user behavior with a MRH patient and the usability and acceptability of a MRH patient for practice and evaluation of medical students' clinical skills. In Study I (n=8) it was observed that students treated MRHs as social actors more than students in prior interactions with virtual human patients (n=27), and used interpersonal touch to comfort and reassure the MRH patient similarly to prior interactions with human patients (n=76). In the within-subjects Study II (n=11), medical students performed a clinical breast exam on each of a MRH and human patient. Participants performed equivalent exams with the MRH and human patients, demonstrating the usability of MRHs to evaluate students' exam skills. The acceptability of the MRH patient for practicing exam skills was high as students rated the experience as believable and educationally beneficial. Acceptability was improved from Study I to Study II due to an increase in the MRH's visual realism, demonstrating that visual realism is critical for simulation of specific interpersonal scenarios.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents Mixed Reality Humans (MRHs), a new type of embodied agent enabling touch-driven communication. Affording touch between human and agent allows MRHs to simulate interpersonal scenarios in which touch is crucial. Two studies provide initial evaluation of user behavior with a MRH patient and the usability and acceptability of a MRH patient for practice and evaluation of medical students' clinical skills. In Study I (n=8) it was observed that students treated MRHs as social actors more than students in prior interactions with virtual human patients (n=27), and used interpersonal touch to comfort and reassure the MRH patient similarly to prior interactions with human patients (n=76). In the within-subjects Study II (n=11), medical students performed a clinical breast exam on each of a MRH and human patient. Participants performed equivalent exams with the MRH and human patients, demonstrating the usability of MRHs to evaluate students' exam skills. The acceptability of the MRH patient for practicing exam skills was high as students rated the experience as believable and educationally beneficial. Acceptability was improved from Study I to Study II due to an increase in the MRH's visual realism, demonstrating that visual realism is critical for simulation of specific interpersonal scenarios.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents Mixed Reality Humans (MRHs), a new type of embodied agent enabling touch-driven communication. Affording touch between human and agent allows MRHs to simulate interpersonal scenarios in which touch is crucial. Two studies provide initial evaluation of user behavior with a MRH patient and the usability and acceptability of a MRH patient for practice and evaluation of medical students' clinical skills. In Study I (n=8) it was observed that students treated MRHs as social actors more than students in prior interactions with virtual human patients (n=27), and used interpersonal touch to comfort and reassure the MRH patient similarly to prior interactions with human patients (n=76). In the within-subjects Study II (n=11), medical students performed a clinical breast exam on each of a MRH and human patient. Participants performed equivalent exams with the MRH and human patients, demonstrating the usability of MRHs to evaluate students' exam skills. The acceptability of the MRH patient for practicing exam skills was high as students rated the experience as believable and educationally beneficial. Acceptability was improved from Study I to Study II due to an increase in the MRH's visual realism, demonstrating that visual realism is critical for simulation of specific interpersonal scenarios.", "title": "Mixed Reality Humans: Evaluating Behavior, Usability, and Acceptability", "normalizedTitle": "Mixed Reality Humans: Evaluating Behavior, Usability, and Acceptability", "fno": "ttg2009030369", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Intelligent Agents", "Virtual Reality", "Life And Medical Sciences", "Artificial", "Augmented", "And Virtual Realities" ], "authors": [ { "givenName": "Aaron", "surname": "Kotranza", "fullName": "Aaron Kotranza", "affiliation": "University of Florida, Gainesville", "__typename": "ArticleAuthorType" }, { "givenName": "Benjamin", "surname": "Lok", "fullName": "Benjamin Lok", "affiliation": "University of Florida, Gainesville", "__typename": "ArticleAuthorType" }, { "givenName": "Adeline", "surname": "Deladisma", "fullName": "Adeline Deladisma", "affiliation": "Medical College of Georgia, Augusta", "__typename": "ArticleAuthorType" }, { "givenName": "Carla M.", "surname": "Pugh", "fullName": "Carla M. Pugh", "affiliation": "Northwestern University, Chicago", "__typename": "ArticleAuthorType" }, { "givenName": "D. Scott", "surname": "Lind", "fullName": "D. Scott Lind", "affiliation": "Medical College of Georgia, Augusta", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2009-05-01 00:00:00", "pubType": "trans", "pages": "369-382", "year": "2009", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/1998/9176/0/91760431", "title": "A Case Study Using the Virtual Environment for Reconstructive Surgery", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1998/91760431/12OmNB9KHtv", "parentPublication": { "id": "proceedings/ieee-vis/1998/9176/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vs-games/2017/5812/0/08055811", "title": "Learning mechanical engineering in a virtual workshop: A preliminary study on utilisability, utility and acceptability", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2017/08055811/12OmNCcKQOB", "parentPublication": { "id": "proceedings/vs-games/2017/5812/0", "title": "2017 9th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2008/1971/0/04480757", "title": "Virtual Human + Tangible Interface = Mixed Reality Human An Initial Exploration with a Virtual Breast Exam Patient", "doi": null, "abstractUrl": "/proceedings-article/vr/2008/04480757/12OmNrJAdRV", "parentPublication": { "id": "proceedings/vr/2008/1971/0", "title": "IEEE Virtual Reality 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2009/3943/0/04811019", "title": "Virtual Humans That Touch Back: Enhancing Nonverbal Communication with Virtual Humans through Bidirectional Touch", "doi": null, "abstractUrl": "/proceedings-article/vr/2009/04811019/12OmNwMXnsz", "parentPublication": { "id": "proceedings/vr/2009/3943/0", "title": "2009 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2006/0224/0/02240245", "title": "Mixed Reality Tabletop (MRT): A Low-Cost Teleconferencing Framework for Mixed-Reality Applications", "doi": null, "abstractUrl": "/proceedings-article/vr/2006/02240245/12OmNxUMHnc", "parentPublication": { "id": "proceedings/vr/2006/0224/0", "title": "IEEE Virtual Reality Conference (VR 2006)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fie/2009/4715/0/05350690", "title": "Work in progress - digital school desk", "doi": null, "abstractUrl": "/proceedings-article/fie/2009/05350690/12OmNzuZUBr", "parentPublication": { "id": "proceedings/fie/2009/4715/0", "title": "2009 39th IEEE Frontiers in Education Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/04/ttg2013040539", "title": "Applying Mixed Reality to Simulate Vulnerable Populations for Practicing Clinical Communication Skills", "doi": null, "abstractUrl": "/journal/tg/2013/04/ttg2013040539/13rRUwInvl1", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2012/01/tth2012010066", "title": "Perceptually Augmented Simulator Design", "doi": null, "abstractUrl": "/journal/th/2012/01/tth2012010066/13rRUygT7fm", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2009/03/tth2009030136", "title": "The Virtual Midas Touch: Helping Behavior After a Mediated Social Touch", "doi": null, "abstractUrl": "/journal/th/2009/03/tth2009030136/13rRUygT7n5", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090535", "title": "Usability of a Foreign Body Object Scenario in VR for Nursing Education", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090535/1jIxzfEJiSI", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2009030355", "articleId": "13rRUwdrdSt", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2009030383", "articleId": "13rRUIJcWlg", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXWRX0", "name": "ttg2009030369s.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2009030369s.zip", "extension": "zip", "size": "5.56 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNAZx8On", "title": "May/June", "year": "2009", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "15", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUIJcWlg", "doi": "10.1109/TVCG.2008.191", "abstract": "Vir tual Environments (VEs) that use a real-walking locomotion interface have typically been restricted in size to the area of the tracked lab space. Techniques proposed to lift this size constraint, enabling real walking in VEs that are larger than the tracked lab space, all require reorientation techniques (ROTs) in the worst-case situation—when a user is close to walking out of the tracked space. We propose a new ROT using visual and audial distractors—objects in the VE that the user focuses on while the VE rotates—and compare our method to current ROTs through three user studies. ROTs using distractors were preferred and ranked more natural by users. Our findings also suggest that improving visual realism and adding sound increased a user's feeling of presence. Users were also less aware of the rotating VE when ROTs with distractors were used.", "abstracts": [ { "abstractType": "Regular", "content": "Vir tual Environments (VEs) that use a real-walking locomotion interface have typically been restricted in size to the area of the tracked lab space. Techniques proposed to lift this size constraint, enabling real walking in VEs that are larger than the tracked lab space, all require reorientation techniques (ROTs) in the worst-case situation—when a user is close to walking out of the tracked space. We propose a new ROT using visual and audial distractors—objects in the VE that the user focuses on while the VE rotates—and compare our method to current ROTs through three user studies. ROTs using distractors were preferred and ranked more natural by users. Our findings also suggest that improving visual realism and adding sound increased a user's feeling of presence. Users were also less aware of the rotating VE when ROTs with distractors were used.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Vir tual Environments (VEs) that use a real-walking locomotion interface have typically been restricted in size to the area of the tracked lab space. Techniques proposed to lift this size constraint, enabling real walking in VEs that are larger than the tracked lab space, all require reorientation techniques (ROTs) in the worst-case situation—when a user is close to walking out of the tracked space. We propose a new ROT using visual and audial distractors—objects in the VE that the user focuses on while the VE rotates—and compare our method to current ROTs through three user studies. ROTs using distractors were preferred and ranked more natural by users. Our findings also suggest that improving visual realism and adding sound increased a user's feeling of presence. Users were also less aware of the rotating VE when ROTs with distractors were used.", "title": "Evaluation of Reorientation Techniques and Distractors for Walking in Large Virtual Environments", "normalizedTitle": "Evaluation of Reorientation Techniques and Distractors for Walking in Large Virtual Environments", "fno": "ttg2009030383", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Graphics", "Virtual Reality" ], "authors": [ { "givenName": "Tabitha C.", "surname": "Peck", "fullName": "Tabitha C. Peck", "affiliation": "University of North Carolina, Chapel Hill, Chapel Hill", "__typename": "ArticleAuthorType" }, { "givenName": "Henry", "surname": "Fuchs", "fullName": "Henry Fuchs", "affiliation": "University of North Carolina, Chapel Hill, Chapel Hill", "__typename": "ArticleAuthorType" }, { "givenName": "Mary C.", "surname": "Whitton", "fullName": "Mary C. Whitton", "affiliation": "University of North Carolina, Chapel Hill, Chapel Hill", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2009-05-01 00:00:00", "pubType": "trans", "pages": "383-394", "year": "2009", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/3dui/2017/6716/0/07893327", "title": "Bookshelf and Bird: Enabling real walking in large VR spaces", "doi": null, "abstractUrl": "/proceedings-article/3dui/2017/07893327/12OmNA1Vnth", "parentPublication": { "id": "proceedings/3dui/2017/6716/0", "title": "2017 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2008/1971/0/04480761", "title": "Evaluation of Reorientation Techniques for Walking in Large Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2008/04480761/12OmNB06l1A", "parentPublication": { "id": "proceedings/vr/2008/1971/0", "title": "IEEE Virtual Reality 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2010/6237/0/05444816", "title": "Improved Redirection with Distractors: A large-scale-real-walking locomotion interface and its effect on navigation in virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2010/05444816/12OmNqBbHKZ", "parentPublication": { "id": "proceedings/vr/2010/6237/0", "title": "2010 IEEE Virtual Reality Conference (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2011/0039/0/05759437", "title": "An evaluation of navigational ability comparing Redirected Free Exploration with Distractors to Walking-in-Place and joystick locomotio interfaces", "doi": null, "abstractUrl": "/proceedings-article/vr/2011/05759437/12OmNx8OuyK", "parentPublication": { "id": "proceedings/vr/2011/0039/0", "title": "2011 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2015/1727/0/07223357", "title": "Towards context-sensitive reorientation for real walking in virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223357/12OmNzE54AN", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2008/2047/0/04476610", "title": "Poster: Generic Redirected Walking & Dynamic Passive Haptics: Evaluation and Implications for Virtual Locomotion Interfaces", "doi": null, "abstractUrl": "/proceedings-article/3dui/2008/04476610/12OmNzlD9i9", "parentPublication": { "id": "proceedings/3dui/2008/2047/0", "title": "2008 IEEE Symposium on 3D User Interfaces", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/07/06109251", "title": "The Design and Evaluation of a Large-Scale Real-Walking Locomotion Interface", "doi": null, "abstractUrl": "/journal/tg/2012/07/06109251/13rRUygT7mV", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dasc-picom-cbdcom-cyberscitech/2021/2174/0/217400a349", "title": "A Redirected Walking Toolkit for Exploring Large-Scale Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/dasc-picom-cbdcom-cyberscitech/2021/217400a349/1BLnzoFxHHy", "parentPublication": { "id": "proceedings/dasc-picom-cbdcom-cyberscitech/2021/2174/0", "title": "2021 IEEE Intl Conf on Dependable, Autonomic and Secure Computing, Intl Conf on Pervasive Intelligence and Computing, Intl Conf on Cloud and Big Data Computing, Intl Conf on Cyber Science and Technology Congress (DASC/PiCom/CBDCom/CyberSciTech)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798117", "title": "Estimation of Rotation Gain Thresholds for Redirected Walking Considering FOV and Gender", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798117/1cJ1fo5PwqY", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08794563", "title": "Estimation of Rotation Gain Thresholds Considering FOV, Gender, and Distractors", "doi": null, "abstractUrl": "/journal/tg/2019/11/08794563/1dNHkjixhDi", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2009030369", "articleId": "13rRUxly8SR", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2009030395", "articleId": "13rRUwIF6dI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNAZx8On", "title": "May/June", "year": "2009", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "15", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwIF6dI", "doi": "10.1109/TVCG.2008.198", "abstract": "Being a tool that assigns optical parameters used in interactive visualization, Transfer Functions (TF) have important effects on the quality of volume rendered medical images. Unfortunately, finding accurate TFs is a tedious and time consuming task because of the trade off between using extensive search spaces and fulfilling the physician's expectations with interactive data exploration tools and interfaces. By addressing this problem, we introduce a semi-automatic method for initial generation of TFs. The proposed method uses a Self Generating Hierarchical Radial Basis Function Network to determine the lobes of a Volume Histogram Stack (VHS) which is introduced as a new domain by aligning the histograms of slices of a image series. The new self generating hierarchical design strategy allows the recognition of suppressed lobes corresponding to suppressed tissues and representation of the overlapping regions which are parts of the lobes but can not be represented by the Gaussian bases in VHS. Moreover, approximation with a minimum set of basis functions provides the possibility of selecting and adjusting suitable units to optimize the TF. Applications on different CT/MR data sets show enhanced rendering quality and reduced optimization time in abdominal studies.", "abstracts": [ { "abstractType": "Regular", "content": "Being a tool that assigns optical parameters used in interactive visualization, Transfer Functions (TF) have important effects on the quality of volume rendered medical images. Unfortunately, finding accurate TFs is a tedious and time consuming task because of the trade off between using extensive search spaces and fulfilling the physician's expectations with interactive data exploration tools and interfaces. By addressing this problem, we introduce a semi-automatic method for initial generation of TFs. The proposed method uses a Self Generating Hierarchical Radial Basis Function Network to determine the lobes of a Volume Histogram Stack (VHS) which is introduced as a new domain by aligning the histograms of slices of a image series. The new self generating hierarchical design strategy allows the recognition of suppressed lobes corresponding to suppressed tissues and representation of the overlapping regions which are parts of the lobes but can not be represented by the Gaussian bases in VHS. Moreover, approximation with a minimum set of basis functions provides the possibility of selecting and adjusting suitable units to optimize the TF. Applications on different CT/MR data sets show enhanced rendering quality and reduced optimization time in abdominal studies.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Being a tool that assigns optical parameters used in interactive visualization, Transfer Functions (TF) have important effects on the quality of volume rendered medical images. Unfortunately, finding accurate TFs is a tedious and time consuming task because of the trade off between using extensive search spaces and fulfilling the physician's expectations with interactive data exploration tools and interfaces. By addressing this problem, we introduce a semi-automatic method for initial generation of TFs. The proposed method uses a Self Generating Hierarchical Radial Basis Function Network to determine the lobes of a Volume Histogram Stack (VHS) which is introduced as a new domain by aligning the histograms of slices of a image series. The new self generating hierarchical design strategy allows the recognition of suppressed lobes corresponding to suppressed tissues and representation of the overlapping regions which are parts of the lobes but can not be represented by the Gaussian bases in VHS. Moreover, approximation with a minimum set of basis functions provides the possibility of selecting and adjusting suitable units to optimize the TF. Applications on different CT/MR data sets show enhanced rendering quality and reduced optimization time in abdominal studies.", "title": "Semiautomatic Transfer Function Initialization for Abdominal Visualization Using Self-Generating Hierarchical Radial Basis Function Networks", "normalizedTitle": "Semiautomatic Transfer Function Initialization for Abdominal Visualization Using Self-Generating Hierarchical Radial Basis Function Networks", "fno": "ttg2009030395", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Volume Visualization", "Transfer Function Design", "Medical Image", "Hierarchical Radial Basis Function Networks", "Multiscale Analysis", "Volume Histogram Stack" ], "authors": [ { "givenName": "M. Alper", "surname": "Selver", "fullName": "M. Alper Selver", "affiliation": "Dokuz Eylül University, Izmir", "__typename": "ArticleAuthorType" }, { "givenName": "Cüneyt", "surname": "Güzeliş", "fullName": "Cüneyt Güzeliş", "affiliation": "Dokuz Eylül University, Izmir", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2009-05-01 00:00:00", "pubType": "trans", "pages": "395-409", "year": "2009", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icnc/2008/3304/2/3304b116", "title": "Polynomial Radial Base Function: A New Local Similarity Measure", "doi": null, "abstractUrl": "/proceedings-article/icnc/2008/3304b116/12OmNBBhN93", "parentPublication": { "id": "proceedings/icnc/2008/3304/2", "title": "2008 Fourth International Conference on Natural Computation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/apwc-on-cse/2014/1955/0/07053868", "title": "Growing radial basis function network models", "doi": null, "abstractUrl": "/proceedings-article/apwc-on-cse/2014/07053868/12OmNBLdKDY", "parentPublication": { "id": "proceedings/apwc-on-cse/2014/1955/0", "title": "2014 Asia-Pacific World Congress on Computer Science and Engineering (APWC on CSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fgcns/2008/3546/3/3546c193", "title": "Supervised Learning Errors by Radial Basis Function Neural Networks and Regularization Networks", "doi": null, "abstractUrl": "/proceedings-article/fgcns/2008/3546c193/12OmNvjgWGD", "parentPublication": { "id": "proceedings/fgcns/2008/3546/3", "title": "Future Generation Communication and Networking Symposia, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cerma/2010/4204/0/4204a383", "title": "Radial Basis Function for Visual Image Retrieval", "doi": null, "abstractUrl": "/proceedings-article/cerma/2010/4204a383/12OmNwtEELE", "parentPublication": { "id": "proceedings/cerma/2010/4204/0", "title": "Electronics, Robotics and Automotive Mechanics Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdma/2011/4455/0/4455a373", "title": "Design and Implementation of Fishery Forecasting System Based on Radial Basis Function Neural Network", "doi": null, "abstractUrl": "/proceedings-article/icdma/2011/4455a373/12OmNxVDuS2", "parentPublication": { "id": "proceedings/icdma/2011/4455/0", "title": "2011 Second International Conference on Digital Manufacturing & Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ihmsc/2009/3752/1/3752a280", "title": "Fuzzy Neural Network Blind Equalization Algorithm Based on Radial Basis Function", "doi": null, "abstractUrl": "/proceedings-article/ihmsc/2009/3752a280/12OmNxWuice", "parentPublication": { "id": null, "title": null, "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/1995/7312/0/73120098", "title": "Towards Positive-Breakdown Radial Basis Function Networks", "doi": null, "abstractUrl": "/proceedings-article/ictai/1995/73120098/12OmNxw5BoL", "parentPublication": { "id": "proceedings/ictai/1995/7312/0", "title": "Proceedings of 7th IEEE International Conference on Tools with Artificial Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/jcai/2009/3615/0/3615a444", "title": "Vehicle Type Recognition Based on Radial Basis Function Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/jcai/2009/3615a444/12OmNxxdZyz", "parentPublication": { "id": "proceedings/jcai/2009/3615/0", "title": "2009 International Joint Conference on Artificial Intelligence (JCAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/kam/2009/3888/2/3888b057", "title": "Research of Data Mining Approach Based on Radial Basis Function Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/kam/2009/3888b057/12OmNyQYtvI", "parentPublication": { "id": "proceedings/kam/2009/3888/2", "title": "Knowledge Acquisition and Modeling, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/02/06908014", "title": "Exploring Brushlet Based 3D Textures in Transfer Function Specification for Direct Volume Rendering of Abdominal Organs", "doi": null, "abstractUrl": "/journal/tg/2015/02/06908014/13rRUwjoNx6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2009030383", "articleId": "13rRUIJcWlg", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2009030410", "articleId": "13rRUwj7cp6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNAZx8On", "title": "May/June", "year": "2009", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "15", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwj7cp6", "doi": "10.1109/TVCG.2008.197", "abstract": "The application of information visualization holds tremendous promise for the electric power industry, but its potential has so far not been sufficiently exploited by the visualization community. Prior work on visualizing electric power systems has been limited to depicting raw or processed infor", "abstracts": [ { "abstractType": "Regular", "content": "The application of information visualization holds tremendous promise for the electric power industry, but its potential has so far not been sufficiently exploited by the visualization community. Prior work on visualizing electric power systems has been limited to depicting raw or processed infor", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The application of information visualization holds tremendous promise for the electric power industry, but its potential has so far not been sufficiently exploited by the visualization community. Prior work on visualizing electric power systems has been limited to depicting raw or processed infor", "title": "A Novel Visualization Technique for Electric Power Grid Analytics", "normalizedTitle": "A Novel Visualization Technique for Electric Power Grid Analytics", "fno": "ttg2009030410", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Applications", "Information Visualization", "Visualization Systems And Software", "Visualization Techniques And Methodologies" ], "authors": [ { "givenName": "Pak Chung", "surname": "Wong", "fullName": "Pak Chung Wong", "affiliation": "Pacific Northwest National Laboratory, Richland", "__typename": "ArticleAuthorType" }, { "givenName": "Kevin", "surname": "Schneider", "fullName": "Kevin Schneider", "affiliation": "Pacific Northwest National Laboratory, Richland", "__typename": "ArticleAuthorType" }, { "givenName": "Patrick", "surname": "Mackey", "fullName": "Patrick Mackey", "affiliation": "Pacific Northwest National Laboratory, Richland", "__typename": "ArticleAuthorType" }, { "givenName": "Harlan", "surname": "Foote", "fullName": "Harlan Foote", "affiliation": "Pacific Northwest National Laboratory, Richland", "__typename": "ArticleAuthorType" }, { "givenName": "George", "surname": "Chin Jr.", "fullName": "George Chin Jr.", "affiliation": "Pacific Northwest National Laboratory, Richland", "__typename": "ArticleAuthorType" }, { "givenName": "Ross", "surname": "Guttromson", "fullName": "Ross Guttromson", "affiliation": "Pacific Northwest National Laboratory, Richland", "__typename": "ArticleAuthorType" }, { "givenName": "Jim", "surname": "Thomas", "fullName": "Jim Thomas", "affiliation": "Pacific Northwest National Laboratory, Richland", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2009-05-01 00:00:00", "pubType": "trans", "pages": "410-423", "year": "2009", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/visual/1994/6627/0/00346292", "title": "Visualization of an electric power transmission system", "doi": null, "abstractUrl": "/proceedings-article/visual/1994/00346292/12OmNBPtJDU", "parentPublication": { "id": "proceedings/visual/1994/6627/0", "title": "Proceedings Visualization '94", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-infovis/2000/0804/0/08040131", "title": "New Methods for the Visualization of Electric Power System Information", "doi": null, "abstractUrl": "/proceedings-article/ieee-infovis/2000/08040131/12OmNBpVQ0b", "parentPublication": { "id": "proceedings/ieee-infovis/2000/0804/0", "title": "Information Visualization, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/03/ttg2009030424", "title": "Visualization of Simulated Urban Spaces: Inferring Parameterized Generation of Streets, Parcels, and Aerial Imagery", "doi": null, "abstractUrl": "/journal/tg/2009/03/ttg2009030424/13rRUNvgzix", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/02/ttg2009020234", "title": "GeoBuilder: A Geometric Algorithm Visualization and Debugging System for 2D and 3D Geometric Computing", "doi": null, "abstractUrl": "/journal/tg/2009/02/ttg2009020234/13rRUwI5TXv", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/05/ttg2012050797", "title": "A Space-Filling Visualization Technique for Multivariate Small-World Graphs", "doi": null, "abstractUrl": "/journal/tg/2012/05/ttg2012050797/13rRUwInvl0", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/01/ttg2009010106", "title": "Asymmetric Tensor Analysis for Flow Visualization", "doi": null, "abstractUrl": "/journal/tg/2009/01/ttg2009010106/13rRUwjoNwW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/05/ttg2009050759", "title": "A Survey of Radial Methods for Information Visualization", "doi": null, "abstractUrl": "/journal/tg/2009/05/ttg2009050759/13rRUx0xPZv", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/2008/02/tts2008020260", "title": "Software Architecture Visualization: An Evaluation Framework and Its Application", "doi": null, "abstractUrl": "/journal/ts/2008/02/tts2008020260/13rRUxAAT9m", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2005/05/v0497", "title": "Advanced Virtual Endoscopic Pituitary Surgery", "doi": null, "abstractUrl": "/journal/tg/2005/05/v0497/13rRUxNmPDJ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/06/v1399", "title": "Graph Signatures for Visual Analytics", "doi": null, "abstractUrl": "/journal/tg/2006/06/v1399/13rRUxYrbUv", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2009030395", "articleId": "13rRUwIF6dI", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2009030424", "articleId": "13rRUNvgzix", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNAZx8On", "title": "May/June", "year": "2009", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "15", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUNvgzix", "doi": "10.1109/TVCG.2008.193", "abstract": "Urban simulation models and their visualization are used to help regional planning agencies evaluate alternative transportation investments, land use regulations, and environmental protection policies. Typical urban simulations provide spatially distributed data about number of inhabitants, land prices, traffic, and other variables. In this article, we build on a synergy of urban simulation, urban visualization, and computer graphics to automatically infer an urban layout for any time step of the simulation sequence. In addition to standard visualization tools, our method gathers data of the original street network, parcels, and aerial imagery and uses the available simulation results to infer changes to the original urban layout and produce a new and plausible layout for the simulation results. In contrast with previous work, our approach automatically updates the layout based on changes in the simulation data and thus can scale to a large simulation over many years. The method in this article offers a substantial step forward in building integrated visualization and behavioral simulation systems for use in community visioning, planning, and policy analysis. We demonstrate our method on several real cases using a 200 GB database for a 16,300 km2 area surrounding Seattle, Washington.", "abstracts": [ { "abstractType": "Regular", "content": "Urban simulation models and their visualization are used to help regional planning agencies evaluate alternative transportation investments, land use regulations, and environmental protection policies. Typical urban simulations provide spatially distributed data about number of inhabitants, land prices, traffic, and other variables. In this article, we build on a synergy of urban simulation, urban visualization, and computer graphics to automatically infer an urban layout for any time step of the simulation sequence. In addition to standard visualization tools, our method gathers data of the original street network, parcels, and aerial imagery and uses the available simulation results to infer changes to the original urban layout and produce a new and plausible layout for the simulation results. In contrast with previous work, our approach automatically updates the layout based on changes in the simulation data and thus can scale to a large simulation over many years. The method in this article offers a substantial step forward in building integrated visualization and behavioral simulation systems for use in community visioning, planning, and policy analysis. We demonstrate our method on several real cases using a 200 GB database for a 16,300 km2 area surrounding Seattle, Washington.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Urban simulation models and their visualization are used to help regional planning agencies evaluate alternative transportation investments, land use regulations, and environmental protection policies. Typical urban simulations provide spatially distributed data about number of inhabitants, land prices, traffic, and other variables. In this article, we build on a synergy of urban simulation, urban visualization, and computer graphics to automatically infer an urban layout for any time step of the simulation sequence. In addition to standard visualization tools, our method gathers data of the original street network, parcels, and aerial imagery and uses the available simulation results to infer changes to the original urban layout and produce a new and plausible layout for the simulation results. In contrast with previous work, our approach automatically updates the layout based on changes in the simulation data and thus can scale to a large simulation over many years. The method in this article offers a substantial step forward in building integrated visualization and behavioral simulation systems for use in community visioning, planning, and policy analysis. We demonstrate our method on several real cases using a 200 GB database for a 16,300 km2 area surrounding Seattle, Washington.", "title": "Visualization of Simulated Urban Spaces: Inferring Parameterized Generation of Streets, Parcels, and Aerial Imagery", "normalizedTitle": "Visualization of Simulated Urban Spaces: Inferring Parameterized Generation of Streets, Parcels, and Aerial Imagery", "fno": "ttg2009030424", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Picture Image Generation", "Information Visualization", "Visualization Techniques And Methodologies" ], "authors": [ { "givenName": "Carlos A.", "surname": "Vanegas", "fullName": "Carlos A. Vanegas", "affiliation": "Purdue University, West Lafayette", "__typename": "ArticleAuthorType" }, { "givenName": "Daniel G.", "surname": "Aliaga", "fullName": "Daniel G. Aliaga", "affiliation": "Purdue University, West Lafayette", "__typename": "ArticleAuthorType" }, { "givenName": "Bedřich", "surname": "Beneš", "fullName": "Bedřich Beneš", "affiliation": "Purdue University, West Lafayette", "__typename": "ArticleAuthorType" }, { "givenName": "Paul", "surname": "Waddell", "fullName": "Paul Waddell", "affiliation": "University of Washington, Seattle", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2009-05-01 00:00:00", "pubType": "trans", "pages": "424-435", "year": "2009", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2004/2128/2/212820339", "title": "Building Detection by Dempster-Shafer Fusion of LIDAR Data and Multispectral Aerial Imagery", "doi": null, "abstractUrl": "/proceedings-article/icpr/2004/212820339/12OmNzdoMEK", "parentPublication": { "id": "proceedings/icpr/2004/2128/2", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/02/ttg2009020234", "title": "GeoBuilder: A Geometric Algorithm Visualization and Debugging System for 2D and 3D Geometric Computing", "doi": null, "abstractUrl": "/journal/tg/2009/02/ttg2009020234/13rRUwI5TXv", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/03/ttg2009030410", "title": "A Novel Visualization Technique for Electric Power Grid Analytics", "doi": null, "abstractUrl": "/journal/tg/2009/03/ttg2009030410/13rRUwj7cp6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/01/ttg2009010106", "title": "Asymmetric Tensor Analysis for Flow Visualization", "doi": null, "abstractUrl": "/journal/tg/2009/01/ttg2009010106/13rRUwjoNwW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/05/ttg2009050759", "title": "A Survey of Radial Methods for Information Visualization", "doi": null, "abstractUrl": "/journal/tg/2009/05/ttg2009050759/13rRUx0xPZv", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/2008/02/tts2008020260", "title": "Software Architecture Visualization: An Evaluation Framework and Its Application", "doi": null, "abstractUrl": "/journal/ts/2008/02/tts2008020260/13rRUxAAT9m", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/01/ttg2012010160", "title": "The Topological Effects of Smoothing", "doi": null, "abstractUrl": "/journal/tg/2012/01/ttg2012010160/13rRUxASuGg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2005/05/v0497", "title": "Advanced Virtual Endoscopic Pituitary Surgery", "doi": null, "abstractUrl": "/journal/tg/2005/05/v0497/13rRUxNmPDJ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2022/05/09910479", "title": "UTM City—Visualization of Unmanned Aerial Vehicles", "doi": null, "abstractUrl": "/magazine/cg/2022/05/09910479/1HcjABEJP56", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2019/0858/0/09006384", "title": "Streetify: Using Street View Imagery And Deep Learning For Urban Streets Development", "doi": null, "abstractUrl": "/proceedings-article/big-data/2019/09006384/1hJrPIgs0cU", "parentPublication": { "id": "proceedings/big-data/2019/0858/0", "title": "2019 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2009030410", "articleId": "13rRUwj7cp6", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2009030436", "articleId": "13rRUxAASVT", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXWRFc", "name": "ttg2009030424s.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2009030424s.zip", "extension": "zip", "size": "81 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNAZx8On", "title": "May/June", "year": "2009", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "15", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxAASVT", "doi": "10.1109/TVCG.2008.104", "abstract": "Continuing improvements in CPU and GPU performances as well as increasing multi-core processor and cluster-based parallelism demand for flexible and scalable parallel rendering solutions that can exploit multipipe hardware accelerated graphics. In fact, to achieve interactive visualization, scalable rendering systems are essential to cope with the rapid growth of data sets. However, parallel rendering systems are non-trivial to develop and often only application specific implementations have been proposed. The task of developing a scalable parallel rendering framework is even more difficult if it should be generic to support various types of data and visualization applications, and at the same time work efficiently on a cluster with distributed graphics cards. In this paper we introduce a novel system called Equalizer, a toolkit for scalable parallel rendering based on OpenGL which provides an application programming interface (API) to develop scalable graphics applications for a wide range of systems ranging from large distributed visualization clusters and multi-processor multipipe graphics systems to single-processor single-pipe desktop machines. We describe the system architecture, the basic API, discuss its advantadges over previous approaches, present example configurations and usage scenarios as well as scalability results.", "abstracts": [ { "abstractType": "Regular", "content": "Continuing improvements in CPU and GPU performances as well as increasing multi-core processor and cluster-based parallelism demand for flexible and scalable parallel rendering solutions that can exploit multipipe hardware accelerated graphics. In fact, to achieve interactive visualization, scalable rendering systems are essential to cope with the rapid growth of data sets. However, parallel rendering systems are non-trivial to develop and often only application specific implementations have been proposed. The task of developing a scalable parallel rendering framework is even more difficult if it should be generic to support various types of data and visualization applications, and at the same time work efficiently on a cluster with distributed graphics cards. In this paper we introduce a novel system called Equalizer, a toolkit for scalable parallel rendering based on OpenGL which provides an application programming interface (API) to develop scalable graphics applications for a wide range of systems ranging from large distributed visualization clusters and multi-processor multipipe graphics systems to single-processor single-pipe desktop machines. We describe the system architecture, the basic API, discuss its advantadges over previous approaches, present example configurations and usage scenarios as well as scalability results.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Continuing improvements in CPU and GPU performances as well as increasing multi-core processor and cluster-based parallelism demand for flexible and scalable parallel rendering solutions that can exploit multipipe hardware accelerated graphics. In fact, to achieve interactive visualization, scalable rendering systems are essential to cope with the rapid growth of data sets. However, parallel rendering systems are non-trivial to develop and often only application specific implementations have been proposed. The task of developing a scalable parallel rendering framework is even more difficult if it should be generic to support various types of data and visualization applications, and at the same time work efficiently on a cluster with distributed graphics cards. In this paper we introduce a novel system called Equalizer, a toolkit for scalable parallel rendering based on OpenGL which provides an application programming interface (API) to develop scalable graphics applications for a wide range of systems ranging from large distributed visualization clusters and multi-processor multipipe graphics systems to single-processor single-pipe desktop machines. We describe the system architecture, the basic API, discuss its advantadges over previous approaches, present example configurations and usage scenarios as well as scalability results.", "title": "Equalizer: A Scalable Parallel Rendering Framework", "normalizedTitle": "Equalizer: A Scalable Parallel Rendering Framework", "fno": "ttg2009030436", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Parallel Rendering", "Scalable Visualization", "Cluster Graphics", "Display Wall" ], "authors": [ { "givenName": "Stefan", "surname": "Eilemann", "fullName": "Stefan Eilemann", "affiliation": "Eyescale Software, Neuchatel and University of Zurich, Zurich", "__typename": "ArticleAuthorType" }, { "givenName": "Maxim", "surname": "Makhinya", "fullName": "Maxim Makhinya", "affiliation": "University of Zurich, Zurich", "__typename": "ArticleAuthorType" }, { "givenName": "Renato", "surname": "Pajarola", "fullName": "Renato Pajarola", "affiliation": "University of Zurich, Zurich", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2009-05-01 00:00:00", "pubType": "trans", "pages": "436-452", "year": "2009", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/sc/2000/9802/0/98020030", "title": "Distributed Rendering for Scalable Displays", "doi": null, "abstractUrl": "/proceedings-article/sc/2000/98020030/12OmNApcuer", "parentPublication": { "id": "proceedings/sc/2000/9802/0", "title": "SC Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/27660016", "title": "OpenGL Multipipe SDK: A Toolkit for Scalable Parallel Rendering", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660016/12OmNBUS755", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/27660017", "title": "A Shader-Based Parallel Rendering Framework", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660017/12OmNvTk02A", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pvg/2001/7223/0/72230085", "title": "Sort-Last Parallel Rendering for Viewing Extremely Large Data Sets on Tile Displays", "doi": null, "abstractUrl": "/proceedings-article/pvg/2001/72230085/12OmNvq5jzS", "parentPublication": { "id": "proceedings/pvg/2001/7223/0", "title": "Parallel and Large-Data Visualization and Graphics, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vv/2004/8781/0/87810017", "title": "Hybrid Hardware-Accelerated Image Composition for Sort-Last Parallel Rendering on Graphics Clusters with Commodity Image Compositor", "doi": null, "abstractUrl": "/proceedings-article/vv/2004/87810017/12OmNvsm6vG", "parentPublication": { "id": "proceedings/vv/2004/8781/0", "title": "Volume Visualization and Graphics, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532786", "title": "OpenGL multipipe SDK: a toolkit for scalable parallel rendering", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532786/12OmNwe2Im1", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dodugc/2005/2496/0/24960388", "title": "Performance Comparisons of Visualization Architectures", "doi": null, "abstractUrl": "/proceedings-article/dodugc/2005/24960388/12OmNxX3urW", "parentPublication": { "id": "proceedings/dodugc/2005/2496/0", "title": "2005 Users Group Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2011/4602/0/4602a172", "title": "Scalable Multi-GPU Decoupled Parallel Rendering Approach in Shared Memory Architecture", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2011/4602a172/12OmNy9PrkP", "parentPublication": { "id": "proceedings/icvrv/2011/4602/0", "title": "2011 International Conference on Virtual Reality and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/02/08466895", "title": "Equalizer 2.0–Convergence of a Parallel Rendering Framework", "doi": null, "abstractUrl": "/journal/tg/2020/02/08466895/13ElNJSxmG4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2001/03/l0241", "title": "Adaptive Parallel Rendering on Multiprocessors and Workstation Clusters", "doi": null, "abstractUrl": "/journal/td/2001/03/l0241/13rRUwInvsn", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2009030424", "articleId": "13rRUNvgzix", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2009030453", "articleId": "13rRUwIF69d", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTYesQb", "name": "ttg2009030436s.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2009030436s.zip", "extension": "zip", "size": "18.1 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNAZx8On", "title": "May/June", "year": "2009", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "15", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwIF69d", "doi": "10.1109/TVCG.2008.106", "abstract": "This article presents a real-time GPU-based post-filtering method for rendering acceptable depth-of-field effects suited for virtual reality. Blurring is achieved by nonlinearly interpolating mipmap images generated from a pinhole image. Major artifacts common in the post-filtering techniques such as bilinear magnification artifact, intensity leakage, and blurring discontinuity are practically eliminated via magnification with a circular filter, anisotropic mipmapping, and smoothing of blurring degrees. The whole framework is accelerated using GPU programs for constant and scalable real-time performance required for virtual reality. We also compare our method to recent GPU-based methods in terms of image quality and rendering performance.", "abstracts": [ { "abstractType": "Regular", "content": "This article presents a real-time GPU-based post-filtering method for rendering acceptable depth-of-field effects suited for virtual reality. Blurring is achieved by nonlinearly interpolating mipmap images generated from a pinhole image. Major artifacts common in the post-filtering techniques such as bilinear magnification artifact, intensity leakage, and blurring discontinuity are practically eliminated via magnification with a circular filter, anisotropic mipmapping, and smoothing of blurring degrees. The whole framework is accelerated using GPU programs for constant and scalable real-time performance required for virtual reality. We also compare our method to recent GPU-based methods in terms of image quality and rendering performance.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This article presents a real-time GPU-based post-filtering method for rendering acceptable depth-of-field effects suited for virtual reality. Blurring is achieved by nonlinearly interpolating mipmap images generated from a pinhole image. Major artifacts common in the post-filtering techniques such as bilinear magnification artifact, intensity leakage, and blurring discontinuity are practically eliminated via magnification with a circular filter, anisotropic mipmapping, and smoothing of blurring degrees. The whole framework is accelerated using GPU programs for constant and scalable real-time performance required for virtual reality. We also compare our method to recent GPU-based methods in terms of image quality and rendering performance.", "title": "Real-Time Depth-of-Field Rendering Using Anisotropically Filtered Mipmap Interpolation", "normalizedTitle": "Real-Time Depth-of-Field Rendering Using Anisotropically Filtered Mipmap Interpolation", "fno": "ttg2009030453", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Picture Image Generation", "Three Dimensional Graphics And Realism" ], "authors": [ { "givenName": "Sungkil", "surname": "Lee", "fullName": "Sungkil Lee", "affiliation": "Pohang University of Science and Technology (POSTECH), Pohang", "__typename": "ArticleAuthorType" }, { "givenName": "Gerard Jounghyun", "surname": "Kim", "fullName": "Gerard Jounghyun Kim", "affiliation": "Korea University, Seoul", "__typename": "ArticleAuthorType" }, { "givenName": "Seungmoon", "surname": "Choi", "fullName": "Seungmoon Choi", "affiliation": "Pohang University of Science and Technology (POSTECH), Pohang", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2009-05-01 00:00:00", "pubType": "trans", "pages": "453-464", "year": "2009", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cyberc/2015/9200/0/9200a324", "title": "Real-Time Depth-Image-Based Rendering on GPU", "doi": null, "abstractUrl": "/proceedings-article/cyberc/2015/9200a324/12OmNBCqbGM", "parentPublication": { "id": "proceedings/cyberc/2015/9200/0", "title": "2015 International Conference on Cyber-Enabled Distributed Computing and Knowledge Discovery (CyberC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2001/7200/0/7200dong", "title": "Volume Rendering of Fine Details Within Medical Data", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2001/7200dong/12OmNx6xHlc", "parentPublication": { "id": "proceedings/ieee-vis/2001/7200/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mmit/2010/4008/1/4008a183", "title": "An Improved Digital Image Interpolation Algorithm", "doi": null, "abstractUrl": "/proceedings-article/mmit/2010/4008a183/12OmNyL0TtW", "parentPublication": { "id": "proceedings/mmit/2010/4008/1", "title": "MultiMedia and Information Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532818", "title": "The magic volume lens: an interactive focus+context technique for volume rendering", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532818/12OmNyuyade", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/04/ttg2008040741", "title": "Animation of Orthogonal Texture Patterns for Vector Field Visualization", "doi": null, "abstractUrl": "/journal/tg/2008/04/ttg2008040741/13rRUNvgyWh", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/03/ttg2008030640", "title": "Silhouette Smoothing for Real-Time Rendering of Mesh Surfaces", "doi": null, "abstractUrl": "/journal/tg/2008/03/ttg2008030640/13rRUwI5TQP", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/04/ttg2008040820", "title": "Output-Sensitive 3D Line Integral Convolution", "doi": null, "abstractUrl": "/journal/tg/2008/04/ttg2008040820/13rRUwghd94", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/04/ttg2008040914", "title": "Interactive Rendering of Dynamic Geometry", "doi": null, "abstractUrl": "/journal/tg/2008/04/ttg2008040914/13rRUxBJhmM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/02/ttg2009020221", "title": "Visualization and Computer Graphics on Isotropically Emissive Volumetric Displays", "doi": null, "abstractUrl": "/journal/tg/2009/02/ttg2009020221/13rRUxYIMUT", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/08/08624466", "title": "Depth of Field Rendering Using Multilayer-Neighborhood Optimization", "doi": null, "abstractUrl": "/journal/tg/2020/08/08624466/17D45WaTkm5", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2009030436", "articleId": "13rRUxAASVT", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2009030465", "articleId": "13rRUyYjKa9", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgBz", "name": "ttg2009030453s.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2009030453s.zip", "extension": "zip", "size": "639 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNAZx8On", "title": "May/June", "year": "2009", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "15", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUyYjKa9", "doi": "10.1109/TVCG.2008.97", "abstract": "Modeling real-world scenes, beyond diffuse objects, plays an important role in computer graphics, virtual reality, and other commercial applications. One active approach is projecting binary patterns in order to obtain correspondence and reconstruct a densely sampled 3D model. In such structured-light systems, determining whether a pixel is directly illuminated by the projector is essential to decoding the patterns. When a scene has abundant indirect light, this process is especially difficult. In this paper, we present a robust pixel classification algorithm for this purpose. Our method correctly establishes the lower and upper bounds of the possible intensity values of an illuminated pixel and of a non-illuminated pixel. Based on the two intervals, our method classifies a pixel by determining whether its intensity is within one interval but not in the other. Our method performs better than standard method due to the fact that it avoids gross errors during decoding process caused by strong inter-reflections. For the remaining uncertain pixels, we apply an iterative algorithm to reduce the inter-reflection within the scene. Thus, more points can be decoded and reconstructed after each iteration. Moreover, the iterative algorithm is carried out in an adaptive fashion for fast convergence.", "abstracts": [ { "abstractType": "Regular", "content": "Modeling real-world scenes, beyond diffuse objects, plays an important role in computer graphics, virtual reality, and other commercial applications. One active approach is projecting binary patterns in order to obtain correspondence and reconstruct a densely sampled 3D model. In such structured-light systems, determining whether a pixel is directly illuminated by the projector is essential to decoding the patterns. When a scene has abundant indirect light, this process is especially difficult. In this paper, we present a robust pixel classification algorithm for this purpose. Our method correctly establishes the lower and upper bounds of the possible intensity values of an illuminated pixel and of a non-illuminated pixel. Based on the two intervals, our method classifies a pixel by determining whether its intensity is within one interval but not in the other. Our method performs better than standard method due to the fact that it avoids gross errors during decoding process caused by strong inter-reflections. For the remaining uncertain pixels, we apply an iterative algorithm to reduce the inter-reflection within the scene. Thus, more points can be decoded and reconstructed after each iteration. Moreover, the iterative algorithm is carried out in an adaptive fashion for fast convergence.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Modeling real-world scenes, beyond diffuse objects, plays an important role in computer graphics, virtual reality, and other commercial applications. One active approach is projecting binary patterns in order to obtain correspondence and reconstruct a densely sampled 3D model. In such structured-light systems, determining whether a pixel is directly illuminated by the projector is essential to decoding the patterns. When a scene has abundant indirect light, this process is especially difficult. In this paper, we present a robust pixel classification algorithm for this purpose. Our method correctly establishes the lower and upper bounds of the possible intensity values of an illuminated pixel and of a non-illuminated pixel. Based on the two intervals, our method classifies a pixel by determining whether its intensity is within one interval but not in the other. Our method performs better than standard method due to the fact that it avoids gross errors during decoding process caused by strong inter-reflections. For the remaining uncertain pixels, we apply an iterative algorithm to reduce the inter-reflection within the scene. Thus, more points can be decoded and reconstructed after each iteration. Moreover, the iterative algorithm is carried out in an adaptive fashion for fast convergence.", "title": "An Adaptive Correspondence Algorithm for Modeling Scenes with Strong Interreflections", "normalizedTitle": "An Adaptive Correspondence Algorithm for Modeling Scenes with Strong Interreflections", "fno": "ttg2009030465", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Graphics", "Three Dimensional Graphics And Realism", "Digitization And Image Capture", "Imaging Geometry" ], "authors": [ { "givenName": "Yi", "surname": "Xu", "fullName": "Yi Xu", "affiliation": "Purdue University, West Lafayette", "__typename": "ArticleAuthorType" }, { "givenName": "Daniel G.", "surname": "Aliaga", "fullName": "Daniel G. Aliaga", "affiliation": "Purdue University, West Lafayette", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2009-05-01 00:00:00", "pubType": "trans", "pages": "465-480", "year": "2009", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tp/2010/06/ttp2010061060", "title": "Shape and Spatially-Varying BRDFs from Photometric Stereo", "doi": null, "abstractUrl": "/journal/tp/2010/06/ttp2010061060/13rRUEgs2uq", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/03/ttg2009030453", "title": "Real-Time Depth-of-Field Rendering Using Anisotropically Filtered Mipmap Interpolation", "doi": null, "abstractUrl": "/journal/tg/2009/03/ttg2009030453/13rRUwIF69d", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/04/ttg2010040676", "title": "Modeling Repetitive Motions Using Structured Light", "doi": null, "abstractUrl": "/journal/tg/2010/04/ttg2010040676/13rRUwcS1CR", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/04/ttg2011040527", "title": "Adaptive Motion Data Representation with Repeated Motion Analysis", "doi": null, "abstractUrl": "/journal/tg/2011/04/ttg2011040527/13rRUxBJhvr", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/08/ttg2011081108", "title": "Representativity for Robust and Adaptive Multiple Importance Sampling", "doi": null, "abstractUrl": "/journal/tg/2011/08/ttg2011081108/13rRUxOdD8i", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/01/ttg2009010034", "title": "Interactive Navigation of Heterogeneous Agents Using Adaptive Roadmaps", "doi": null, "abstractUrl": "/journal/tg/2009/01/ttg2009010034/13rRUxcbnH5", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/03/ttg2008030487", "title": "Advances in the Dynallax Solid-State Dynamic Parallax Barrier Autostereoscopic Visualization Display System", "doi": null, "abstractUrl": "/journal/tg/2008/03/ttg2008030487/13rRUy2YLYn", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/04/v0658", "title": "Multifocal Projection: A Multiprojector Technique for Increasing Focal Depth", "doi": null, "abstractUrl": "/journal/tg/2006/04/v0658/13rRUypp57x", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2019/3131/0/313100a194", "title": "Frequency Shift Triangulation: A Robust Fringe Projection Technique for 3D Shape Acquisition in the Presence of Strong Interreflections", "doi": null, "abstractUrl": "/proceedings-article/3dv/2019/313100a194/1ezRBg9CfUk", "parentPublication": { "id": "proceedings/3dv/2019/3131/0", "title": "2019 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/10/09380921", "title": "Adaptive Irradiance Sampling for Many-Light Rendering of Subsurface Scattering", "doi": null, "abstractUrl": "/journal/tg/2022/10/09380921/1s2GgiOWWt2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2009030453", "articleId": "13rRUwIF69d", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2009030481", "articleId": "13rRUxAAT7y", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTYesV1", "name": "ttg2009030465s.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2009030465s.zip", "extension": "zip", "size": "43 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNAZx8On", "title": "May/June", "year": "2009", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "15", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxAAT7y", "doi": "10.1109/TVCG.2008.199", "abstract": "Human motion indexing and retrieval are important for animators due to the need to search for motions in the database which can be blended and concatenated. Most of the previous researches of human motion indexing and retrieval compute the Euclidean distance of joint angles or joint positions. Such approaches are difficult to apply for cases in which multiple characters are closely interacting with each other, as the relationships of the characters are not encoded in the representation. In this research, we propose a topology-based approach to index the motions of two human characters in close contact. We compute and encode how the two bodies are tangled based on the concept of rational tangles. The encoded relationships, which we define as {\\it TangleList}, are used to determine the similarity of the pairs of postures. Using our method, we can index and retrieve motions such as one person piggy-backing another, one person assisting another in walking, and two persons dancing / wrestling. Our method is useful to manage a motion database of multiple characters. We can also produce motion graph structures of two characters closely interacting with each other by interpolating and concatenating topologically similar postures and motion clips, which are applicable to 3D computer games and computer animation.", "abstracts": [ { "abstractType": "Regular", "content": "Human motion indexing and retrieval are important for animators due to the need to search for motions in the database which can be blended and concatenated. Most of the previous researches of human motion indexing and retrieval compute the Euclidean distance of joint angles or joint positions. Such approaches are difficult to apply for cases in which multiple characters are closely interacting with each other, as the relationships of the characters are not encoded in the representation. In this research, we propose a topology-based approach to index the motions of two human characters in close contact. We compute and encode how the two bodies are tangled based on the concept of rational tangles. The encoded relationships, which we define as {\\it TangleList}, are used to determine the similarity of the pairs of postures. Using our method, we can index and retrieve motions such as one person piggy-backing another, one person assisting another in walking, and two persons dancing / wrestling. Our method is useful to manage a motion database of multiple characters. We can also produce motion graph structures of two characters closely interacting with each other by interpolating and concatenating topologically similar postures and motion clips, which are applicable to 3D computer games and computer animation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Human motion indexing and retrieval are important for animators due to the need to search for motions in the database which can be blended and concatenated. Most of the previous researches of human motion indexing and retrieval compute the Euclidean distance of joint angles or joint positions. Such approaches are difficult to apply for cases in which multiple characters are closely interacting with each other, as the relationships of the characters are not encoded in the representation. In this research, we propose a topology-based approach to index the motions of two human characters in close contact. We compute and encode how the two bodies are tangled based on the concept of rational tangles. The encoded relationships, which we define as {\\it TangleList}, are used to determine the similarity of the pairs of postures. Using our method, we can index and retrieve motions such as one person piggy-backing another, one person assisting another in walking, and two persons dancing / wrestling. Our method is useful to manage a motion database of multiple characters. We can also produce motion graph structures of two characters closely interacting with each other by interpolating and concatenating topologically similar postures and motion clips, which are applicable to 3D computer games and computer animation.", "title": "Indexing and Retrieving Motions of Characters in Close Contact", "normalizedTitle": "Indexing and Retrieving Motions of Characters in Close Contact", "fno": "ttg2009030481", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Animation", "Face And Gesture Recognition" ], "authors": [ { "givenName": "Edmond S.L.", "surname": "Ho", "fullName": "Edmond S.L. Ho", "affiliation": "University of Edinburgh, Edinburgh", "__typename": "ArticleAuthorType" }, { "givenName": "Taku", "surname": "Komura", "fullName": "Taku Komura", "affiliation": "University of Edinburgh, Edinburgh", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2009-05-01 00:00:00", "pubType": "trans", "pages": "481-492", "year": "2009", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/acii/2015/9953/0/07344583", "title": "Decoupling facial expressions and head motions in complex emotions", "doi": null, "abstractUrl": "/proceedings-article/acii/2015/07344583/12OmNB9t6qd", "parentPublication": { "id": "proceedings/acii/2015/9953/0", "title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2003/2028/0/20280194", "title": "Physical Touch-Up of Human Motions", "doi": null, "abstractUrl": "/proceedings-article/pg/2003/20280194/12OmNB9t6vH", "parentPublication": { "id": "proceedings/pg/2003/2028/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ca/1997/7984/0/79840048", "title": "Capturing and analyzing stability of human body motions using video cameras", "doi": null, "abstractUrl": "/proceedings-article/ca/1997/79840048/12OmNCgrDbP", "parentPublication": { "id": "proceedings/ca/1997/7984/0", "title": "Computer Animation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vs-games/2017/5812/0/08056588", "title": "When facial expressions dominate emotion perception in groups of virtual characters", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2017/08056588/12OmNz61d7s", "parentPublication": { "id": "proceedings/vs-games/2017/5812/0", "title": "2017 9th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2009/01/ttp2009010158", "title": "Human Motion Tracking by Registering an Articulated Surface to 3D Points and Normals", "doi": null, "abstractUrl": "/journal/tp/2009/01/ttp2009010158/13rRUILtJnc", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2011/04/05934840", "title": "Direct Control of Simulated Nonhuman Characters", "doi": null, "abstractUrl": "/magazine/cg/2011/04/05934840/13rRUILtJtD", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2011/04/mcg2011040056", "title": "Direct Control of Simulated Nonhuman Characters", "doi": null, "abstractUrl": "/magazine/cg/2011/04/mcg2011040056/13rRUwInv93", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/10/06781622", "title": "Diverse Motions and Character Shapes for Simulated Skills", "doi": null, "abstractUrl": "/journal/tg/2014/10/06781622/13rRUxBa5rY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797856", "title": "Real-time Animation and Motion Retargeting of Virtual Characters Based on Single RGB-D Camera", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797856/1cJ1evLlHRm", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2019/2297/0/229700a089", "title": "A Tangible Interface using 3D Printed Figures for Searching for Combat Motions of Two Characters", "doi": null, "abstractUrl": "/proceedings-article/cw/2019/229700a089/1fHkmKWZlSw", "parentPublication": { "id": "proceedings/cw/2019/2297/0", "title": "2019 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2009030465", "articleId": "13rRUyYjKa9", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2009030493", "articleId": "13rRUxlgy3z", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgzT", "name": "ttg2009030481s.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2009030481s.zip", "extension": "zip", "size": "24.4 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNAZx8On", "title": "May/June", "year": "2009", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "15", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxlgy3z", "doi": "10.1109/TVCG.2008.107", "abstract": "We propose a novel boundary handling algorithm for particle-based fluids. Based on a predictor-corrector scheme for both velocity and position, one- and two-way coupling with rigid bodies can be realized. The proposed algorithm offers significant improvements over existing penalty-based approaches. Different slip conditions can be realized and non-penetration is enforced. Direct forcing is employed to meet the desired boundary conditions and to ensure valid states after each simulation step. We have performed various experiments in 2D and 3D. They illustrate one- and two-way coupling of rigid bodies and fluids, the effects of hydrostatic and dynamic forces on a rigid body as well as different slip conditions. Numerical experiments and performance measurements are provided.", "abstracts": [ { "abstractType": "Regular", "content": "We propose a novel boundary handling algorithm for particle-based fluids. Based on a predictor-corrector scheme for both velocity and position, one- and two-way coupling with rigid bodies can be realized. The proposed algorithm offers significant improvements over existing penalty-based approaches. Different slip conditions can be realized and non-penetration is enforced. Direct forcing is employed to meet the desired boundary conditions and to ensure valid states after each simulation step. We have performed various experiments in 2D and 3D. They illustrate one- and two-way coupling of rigid bodies and fluids, the effects of hydrostatic and dynamic forces on a rigid body as well as different slip conditions. Numerical experiments and performance measurements are provided.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose a novel boundary handling algorithm for particle-based fluids. Based on a predictor-corrector scheme for both velocity and position, one- and two-way coupling with rigid bodies can be realized. The proposed algorithm offers significant improvements over existing penalty-based approaches. Different slip conditions can be realized and non-penetration is enforced. Direct forcing is employed to meet the desired boundary conditions and to ensure valid states after each simulation step. We have performed various experiments in 2D and 3D. They illustrate one- and two-way coupling of rigid bodies and fluids, the effects of hydrostatic and dynamic forces on a rigid body as well as different slip conditions. Numerical experiments and performance measurements are provided.", "title": "Direct Forcing for Lagrangian Rigid-Fluid Coupling", "normalizedTitle": "Direct Forcing for Lagrangian Rigid-Fluid Coupling", "fno": "ttg2009030493", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Physically Based Modeling", "Animation" ], "authors": [ { "givenName": "Markus", "surname": "Becker", "fullName": "Markus Becker", "affiliation": "Albert-Ludwigs-University Freiburg, Freiburg", "__typename": "ArticleAuthorType" }, { "givenName": "Hendrik", "surname": "Tessendorf", "fullName": "Hendrik Tessendorf", "affiliation": "Albert-Ludwigs-University Freiburg, Freiburg", "__typename": "ArticleAuthorType" }, { "givenName": "Matthias", "surname": "Teschner", "fullName": "Matthias Teschner", "affiliation": "Albert-Ludwigs-University Freiburg, Freiburg", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2009-05-01 00:00:00", "pubType": "trans", "pages": "493-503", "year": "2009", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cgi/2004/2171/0/21710327", "title": "Mixing Deformable and Rigid-Body Mechanics Simulation", "doi": null, "abstractUrl": "/proceedings-article/cgi/2004/21710327/12OmNASILTV", "parentPublication": { "id": "proceedings/cgi/2004/2171/0", "title": "Proceedings. Computer Graphics International", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2010/6984/0/05540002", "title": "Non-rigid structure from locally-rigid motion", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2010/05540002/12OmNCgrCXJ", "parentPublication": { "id": "proceedings/cvpr/2010/6984/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pbg/2005/20/0/01500327", "title": "A unified Lagrangian approach to solid-fluid animation", "doi": null, "abstractUrl": "/proceedings-article/pbg/2005/01500327/12OmNvD8RFL", "parentPublication": { "id": "proceedings/pbg/2005/20/0", "title": "Point-Based Graphics 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2016/2303/0/2303a235", "title": "Individual Time-Stepping for Rigid-Fluid Coupling of Particle Based Fluids", "doi": null, "abstractUrl": "/proceedings-article/cw/2016/2303a235/12OmNvT2oZL", "parentPublication": { "id": "proceedings/cw/2016/2303/0", "title": "2016 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2011/4602/0/4602a164", "title": "SPH-Based Fluid Simulation: A Survey", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2011/4602a164/12OmNyKa6dj", "parentPublication": { "id": "proceedings/icvrv/2011/4602/0", "title": "2011 International Conference on Virtual Reality and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2011/04/mcg2011040056", "title": "Direct Control of Simulated Nonhuman Characters", "doi": null, "abstractUrl": "/magazine/cg/2011/04/mcg2011040056/13rRUwInv93", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/01/ttg2010010070", "title": "Fluid Simulation with Articulated Bodies", "doi": null, "abstractUrl": "/journal/tg/2010/01/ttg2010010070/13rRUxDqS8f", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/04/ttg2008040797", "title": "Two-Way Coupled SPH and Particle Level Set Fluid Simulation", "doi": null, "abstractUrl": "/journal/tg/2008/04/ttg2008040797/13rRUxE04tu", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/1997/03/mcg1997030052", "title": "Real-Time Fluid Simulation in a Dynamic Virtual Environment", "doi": null, "abstractUrl": "/magazine/cg/1997/03/mcg1997030052/13rRUyXKxU3", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2004/02/v0189", "title": "A Fast Impulsive Contact Suite for Rigid Body Simulation", "doi": null, "abstractUrl": "/journal/tg/2004/02/v0189/13rRUygT7mK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2009030481", "articleId": "13rRUxAAT7y", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2009030504", "articleId": "13rRUx0gezR", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXWRV8", "name": "ttg2009030493s.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2009030493s.zip", "extension": "zip", "size": "43.3 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNAZx8On", "title": "May/June", "year": "2009", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "15", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUx0gezR", "doi": "10.1109/TVCG.2008.103", "abstract": "Shape indexing, classification, and retrieval are fundamental problems in computer graphics. This work introduces a novel method for surface indexing and classification based on Teichmuller theory. The Teichmuller space for surfaces with the same topology is a finite dimensional manifold, where each point represents a conformal equivalence class, a curve represents a deformation process from one class to the other. We apply Teichmuller space coordinates as shape descriptors, which are succinct, discriminating and intrinsic; invariant under the rigid motions and scalings, insensitive to resolutions. Furthermore, the method has solid theoretic foundation, and the computation of Teichmuller coordinates is practical, stable and efficient. This work focuses on the surfaces with negative Euler numbers, which have a unique conformal Riemannian metric with -1 Gaussian curvature. The coordinates which we will compute are the lengths of a special set of geodesics under this special metric. The metric can be obtained by the curvature flow algorithm, the geodesics can be calculated using algebraic topological method. We tested our method extensively for indexing and comparison of about one hundred of surfaces with various topologies, geometries and resolutions. The experimental results show the efficacy and efficiency of the length coordinate of the Teichmuller space.", "abstracts": [ { "abstractType": "Regular", "content": "Shape indexing, classification, and retrieval are fundamental problems in computer graphics. This work introduces a novel method for surface indexing and classification based on Teichmuller theory. The Teichmuller space for surfaces with the same topology is a finite dimensional manifold, where each point represents a conformal equivalence class, a curve represents a deformation process from one class to the other. We apply Teichmuller space coordinates as shape descriptors, which are succinct, discriminating and intrinsic; invariant under the rigid motions and scalings, insensitive to resolutions. Furthermore, the method has solid theoretic foundation, and the computation of Teichmuller coordinates is practical, stable and efficient. This work focuses on the surfaces with negative Euler numbers, which have a unique conformal Riemannian metric with -1 Gaussian curvature. The coordinates which we will compute are the lengths of a special set of geodesics under this special metric. The metric can be obtained by the curvature flow algorithm, the geodesics can be calculated using algebraic topological method. We tested our method extensively for indexing and comparison of about one hundred of surfaces with various topologies, geometries and resolutions. The experimental results show the efficacy and efficiency of the length coordinate of the Teichmuller space.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Shape indexing, classification, and retrieval are fundamental problems in computer graphics. This work introduces a novel method for surface indexing and classification based on Teichmuller theory. The Teichmuller space for surfaces with the same topology is a finite dimensional manifold, where each point represents a conformal equivalence class, a curve represents a deformation process from one class to the other. We apply Teichmuller space coordinates as shape descriptors, which are succinct, discriminating and intrinsic; invariant under the rigid motions and scalings, insensitive to resolutions. Furthermore, the method has solid theoretic foundation, and the computation of Teichmuller coordinates is practical, stable and efficient. This work focuses on the surfaces with negative Euler numbers, which have a unique conformal Riemannian metric with -1 Gaussian curvature. The coordinates which we will compute are the lengths of a special set of geodesics under this special metric. The metric can be obtained by the curvature flow algorithm, the geodesics can be calculated using algebraic topological method. We tested our method extensively for indexing and comparison of about one hundred of surfaces with various topologies, geometries and resolutions. The experimental results show the efficacy and efficiency of the length coordinate of the Teichmuller space.", "title": "Computing Teichmüller Shape Space", "normalizedTitle": "Computing Teichmüller Shape Space", "fno": "ttg2009030504", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Curve", "Surface", "Solid", "And Object Representations", "Geometric Algorithms", "Languages", "And Systems" ], "authors": [ { "givenName": "Miao", "surname": "Jin", "fullName": "Miao Jin", "affiliation": "University of Louisiana at Lafayette, Lafayette", "__typename": "ArticleAuthorType" }, { "givenName": "Wei", "surname": "Zeng", "fullName": "Wei Zeng", "affiliation": "Stony Brook University, Stony Brook", "__typename": "ArticleAuthorType" }, { "givenName": "Feng", "surname": "Luo", "fullName": "Feng Luo", "affiliation": "Rutgers University, Piscataway", "__typename": "ArticleAuthorType" }, { "givenName": "Xianfeng", "surname": "Gu", "fullName": "Xianfeng Gu", "affiliation": "Stony Brook University, Stony Brook", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2009-05-01 00:00:00", "pubType": "trans", "pages": "504-517", "year": "2009", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2009/3992/0/05206578", "title": "Shape analysis with conformal invariants for multiply connected domains and its application to analyzing brain morphology", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2009/05206578/12OmNBQTJkt", "parentPublication": { "id": "proceedings/cvpr/2009/3992/0", "title": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2001/7200/0/7200ying", "title": "Nonmanifold Subdivision", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2001/7200ying/12OmNBuL14k", "parentPublication": { "id": "proceedings/ieee-vis/2001/7200/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2004/8788/0/87880267", "title": "Optimal Global Conformal Surface Parameterization", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2004/87880267/12OmNsbGvEw", "parentPublication": { "id": "proceedings/ieee-vis/2004/8788/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2011/0063/0/06130442", "title": "3D dynamics analysis in Teichmüller space", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2011/06130442/12OmNzA6GQY", "parentPublication": { "id": "proceedings/iccvw/2011/0063/0", "title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2009/4420/0/05459416", "title": "Studying brain morphometry using conformal equivalence class", "doi": null, "abstractUrl": "/proceedings-article/iccv/2009/05459416/12OmNzBOhQF", "parentPublication": { "id": "proceedings/iccv/2009/4420/0", "title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2015/10/07010934", "title": "Shape-from-Template", "doi": null, "abstractUrl": "/journal/tp/2015/10/07010934/13rRUxAAT2u", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2002/04/v0346", "title": "Robust Creation of Implicit Surfaces from Polygonal Meshes", "doi": null, "abstractUrl": "/journal/tg/2002/04/v0346/13rRUxD9h4Y", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/05/ttg2008051030", "title": "Discrete Surface Ricci Flow", "doi": null, "abstractUrl": "/journal/tg/2008/05/ttg2008051030/13rRUyfbwqB", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/04/ttg2008040805", "title": "Globally Optimal Surface Mapping for Surfaces with Arbitrary Topology", "doi": null, "abstractUrl": "/journal/tg/2008/04/ttg2008040805/13rRUygT7su", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2010/04/ttp2010040662", "title": "Ricci Flow for 3D Shape Analysis", "doi": null, "abstractUrl": "/journal/tp/2010/04/ttp2010040662/13rRUyv53Gz", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2009030493", "articleId": "13rRUxlgy3z", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZprc", "title": "March", "year": "2019", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "25", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45We0UD7", "doi": "10.1109/TVCG.2019.2891030", "abstract": null, "abstracts": [], "normalizedAbstract": null, "title": "Farewell and New EIC Introduction", "normalizedTitle": "Farewell and New EIC Introduction", "fno": "08629339", "hasPdf": true, "idPrefix": "tg", "keywords": [], "authors": [ { "givenName": "Leila", "surname": "De Floriani", "fullName": "Leila De Floriani", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "03", "pubDate": "2019-03-01 00:00:00", "pubType": "trans", "pages": "1447-1448", "year": "2019", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/td/2006/01/01549809", "title": "Editorial: EIC Farewell and New EIC Introduction", "doi": null, "abstractUrl": "/journal/td/2006/01/01549809/13rRUILLkuX", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/03/ttg2011030261", "title": "Editorial: EIC Farewell and New EIC Introduction", "doi": null, "abstractUrl": "/journal/tg/2011/03/ttg2011030261/13rRUILLkvk", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/01/v0001", "title": "Editorial: EIC Farewell and New EIC Introduction", "doi": null, "abstractUrl": "/journal/tg/2007/01/v0001/13rRUIM2VGW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2008/04/ttm2008040385", "title": "Editorial: EIC Farewell and New EIC Introduction", "doi": null, "abstractUrl": "/journal/tm/2008/04/ttm2008040385/13rRUwIF69R", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2009/12/ttd2009121713", "title": "Editorial: EIC Farewell and New EIC Introduction", "doi": null, "abstractUrl": "/journal/td/2009/12/ttd2009121713/13rRUx0gezw", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/lt/2013/01/tlt2013010001", "title": "EiC team farewell and new EiC team introduction", "doi": null, "abstractUrl": "/journal/lt/2013/01/tlt2013010001/13rRUxAATd5", "parentPublication": { "id": "trans/lt", "title": "IEEE Transactions on Learning Technologies", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/01/06966881", "title": "EIC Farewell and New EIC Introduction", "doi": null, "abstractUrl": "/journal/tg/2015/01/06966881/13rRUxBa565", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2013/12/ttd2013122322", "title": "Editor's Note: EIC Farewell and New EIC Introduction", "doi": null, "abstractUrl": "/journal/td/2013/12/ttd2013122322/13rRUxZ0o18", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/02/10003073", "title": "Farewell and New EIC Introduction", "doi": null, "abstractUrl": "/journal/tg/2023/02/10003073/1Jv6wZRSJq0", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tb/2021/06/09642424", "title": "EIC Farewell and New EIC Introduction", "doi": null, "abstractUrl": "/journal/tb/2021/06/09642424/1zarErdh7XO", "parentPublication": { "id": "trans/tb", "title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": null, "next": { "fno": "08301570", "articleId": "17D45W9KVHk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZprc", "title": "March", "year": "2019", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "25", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45W9KVHk", "doi": "10.1109/TVCG.2018.2808972", "abstract": "We propose a novel framework for hair animation as well as hair-water interaction that supports millions of hairs. First, we develop a hair animation framework that embeds hair into a tetrahedralized volume mesh that we kinematically skin to deform and follow the exterior of an animated character. Allowing the hairs to follow their precomputed embedded locations in the kinematically deforming skinned mesh already provides visually plausible behavior. Creating a copy of the tetrahedral mesh, endowing it with springs, and attaching it to the kinematically skinned mesh creates more dynamic behavior. Notably, the springs can be quite weak and thus efficient to simulate because they are structurally supported by the kinematic mesh. If independent simulation of individual hairs or guide hairs is desired, they too benefit from being anchored to the kinematic mesh dramatically increasing efficiency as weak springs can be used while still supporting interesting and dramatic hairstyles. Furthermore, we explain how to embed these dynamic simulations into the kinematically deforming skinned mesh so that they can be used as part of a blendshape system where an artist can make many subsequent iterations without requiring any additional simulation. Although there are many applications for our newly proposed approach to hair animation, we mostly focus on the particularly challenging problem of hair-water interaction. While doing this, we discuss how porosities are stored in the kinematic mesh, how the kinematically deforming mesh can be used to apply drag and adhesion forces to the water, etc.", "abstracts": [ { "abstractType": "Regular", "content": "We propose a novel framework for hair animation as well as hair-water interaction that supports millions of hairs. First, we develop a hair animation framework that embeds hair into a tetrahedralized volume mesh that we kinematically skin to deform and follow the exterior of an animated character. Allowing the hairs to follow their precomputed embedded locations in the kinematically deforming skinned mesh already provides visually plausible behavior. Creating a copy of the tetrahedral mesh, endowing it with springs, and attaching it to the kinematically skinned mesh creates more dynamic behavior. Notably, the springs can be quite weak and thus efficient to simulate because they are structurally supported by the kinematic mesh. If independent simulation of individual hairs or guide hairs is desired, they too benefit from being anchored to the kinematic mesh dramatically increasing efficiency as weak springs can be used while still supporting interesting and dramatic hairstyles. Furthermore, we explain how to embed these dynamic simulations into the kinematically deforming skinned mesh so that they can be used as part of a blendshape system where an artist can make many subsequent iterations without requiring any additional simulation. Although there are many applications for our newly proposed approach to hair animation, we mostly focus on the particularly challenging problem of hair-water interaction. While doing this, we discuss how porosities are stored in the kinematic mesh, how the kinematically deforming mesh can be used to apply drag and adhesion forces to the water, etc.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose a novel framework for hair animation as well as hair-water interaction that supports millions of hairs. First, we develop a hair animation framework that embeds hair into a tetrahedralized volume mesh that we kinematically skin to deform and follow the exterior of an animated character. Allowing the hairs to follow their precomputed embedded locations in the kinematically deforming skinned mesh already provides visually plausible behavior. Creating a copy of the tetrahedral mesh, endowing it with springs, and attaching it to the kinematically skinned mesh creates more dynamic behavior. Notably, the springs can be quite weak and thus efficient to simulate because they are structurally supported by the kinematic mesh. If independent simulation of individual hairs or guide hairs is desired, they too benefit from being anchored to the kinematic mesh dramatically increasing efficiency as weak springs can be used while still supporting interesting and dramatic hairstyles. Furthermore, we explain how to embed these dynamic simulations into the kinematically deforming skinned mesh so that they can be used as part of a blendshape system where an artist can make many subsequent iterations without requiring any additional simulation. Although there are many applications for our newly proposed approach to hair animation, we mostly focus on the particularly challenging problem of hair-water interaction. While doing this, we discuss how porosities are stored in the kinematic mesh, how the kinematically deforming mesh can be used to apply drag and adhesion forces to the water, etc.", "title": "A Skinned Tetrahedral Mesh for Hair Animation and Hair-Water Interaction", "normalizedTitle": "A Skinned Tetrahedral Mesh for Hair Animation and Hair-Water Interaction", "fno": "08301570", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Hair", "Animation", "Kinematics", "Deformable Models", "Springs", "Skin", "Computational Modeling", "Computer Graphics", "Hair", "Skinning", "Tetrahedral Mesh", "Blendshape", "Water" ], "authors": [ { "givenName": "Minjae", "surname": "Lee", "fullName": "Minjae Lee", "affiliation": "Stanford University, Stanford, CA", "__typename": "ArticleAuthorType" }, { "givenName": "David", "surname": "Hyde", "fullName": "David Hyde", "affiliation": "Stanford University, Stanford, CA", "__typename": "ArticleAuthorType" }, { "givenName": "Michael", "surname": "Bao", "fullName": "Michael Bao", "affiliation": "Stanford University, Stanford, CA", "__typename": "ArticleAuthorType" }, { "givenName": "Ronald", "surname": "Fedkiw", "fullName": "Ronald Fedkiw", "affiliation": "Stanford University, Stanford, CA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2019-03-01 00:00:00", "pubType": "trans", "pages": "1449-1459", "year": "2019", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iv/2001/1195/0/11950186", "title": "A Design Tool for the Hierarchical Hair Model", "doi": null, "abstractUrl": "/proceedings-article/iv/2001/11950186/12OmNA2cYzp", "parentPublication": { "id": "proceedings/iv/2001/1195/0", "title": "Proceedings Fifth International Conference on Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icgciot/2015/7910/0/07380559", "title": "Automatic hair color de-identification", "doi": null, "abstractUrl": "/proceedings-article/icgciot/2015/07380559/12OmNANBZo5", "parentPublication": { "id": "proceedings/icgciot/2015/7910/0", "title": "2015 International Conference on Green Computing and Internet of Things (ICGCIoT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2000/0743/0/07430145", "title": "Fur and Hair: Practical Modeling and Rendering Techniques", "doi": null, "abstractUrl": "/proceedings-article/iv/2000/07430145/12OmNAle6kp", "parentPublication": { "id": "proceedings/iv/2000/0743/0", "title": "2000 IEEE Conference on Information Visualization. An International Conference on Computer Visualization and Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacific-graphics/2010/4205/0/4205a085", "title": "A Simplified Plane-Parallel Scattering Model and Its Application to Hair Rendering", "doi": null, "abstractUrl": "/proceedings-article/pacific-graphics/2010/4205a085/12OmNBzRNtL", "parentPublication": { "id": "proceedings/pacific-graphics/2010/4205/0", "title": "Pacific Conference on Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2015/0379/0/0379a069", "title": "Automatic Segmentation of Hair in Images", "doi": null, "abstractUrl": "/proceedings-article/ism/2015/0379a069/12OmNwBT1mx", "parentPublication": { "id": "proceedings/ism/2015/0379/0", "title": "2015 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbgames/2009/3963/0/3963a185", "title": "Procedural Hair Generation", "doi": null, "abstractUrl": "/proceedings-article/sbgames/2009/3963a185/12OmNzgwmRo", "parentPublication": { "id": "proceedings/sbgames/2009/3963/0", "title": "2009 VIII Brazilian Symposium on Games and Digital Entertainment", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/03/06910280", "title": "2.5D Cartoon Hair Modeling and Manipulation", "doi": null, "abstractUrl": "/journal/tg/2015/03/06910280/13rRUIJuxpC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2001/03/mcg2001030036", "title": "V-HairStudio: An Interactive Tool for Hair Design", "doi": null, "abstractUrl": "/magazine/cg/2001/03/mcg2001030036/13rRUwInvDe", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/07/07448467", "title": "Adaptive Skinning for Interactive Hair-Solid Simulation", "doi": null, "abstractUrl": "/journal/tg/2017/07/07448467/13rRUygBw7e", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/04/09220808", "title": "Real-Time Hair Simulation With Neural Interpolation", "doi": null, "abstractUrl": "/journal/tg/2022/04/09220808/1nRLElyFvfG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08629339", "articleId": "17D45We0UD7", "__typename": "AdjacentArticleType" }, "next": { "fno": "08303701", "articleId": "17D45W2WyxH", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZprc", "title": "March", "year": "2019", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "25", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45W2WyxH", "doi": "10.1109/TVCG.2018.2810068", "abstract": "Analyzing depressions plays an important role in meteorology, especially in the study of cyclones. In particular, the study of the temporal evolution of cyclones requires a robust depression tracking framework. To cope with this demand we propose a pipeline for the exploration of cyclones and their temporal evolution. This entails a generic framework for their identification and tracking. The fact that depressions and cyclones are not well-defined objects and their shape and size characteristics change over time makes this task especially challenging. Our method combines the robustness of topological approaches and the detailed tracking information from optical flow analysis. At first cyclones are identified within each time step based on well-established topological concepts. Then candidate tracks are computed from an optical flow field. These tracks are clustered within a moving time window to distill dominant coherent cyclone movements, which are then forwarded to a final tracking step. In contrast to previous methods our method requires only a few intuitive parameters. An integration into an exploratory framework helps in the study of cyclone movement by identifying smooth, representative tracks. Multiple case studies demonstrate the effectiveness of the method in tracking cyclones, both in the northern and southern hemisphere.", "abstracts": [ { "abstractType": "Regular", "content": "Analyzing depressions plays an important role in meteorology, especially in the study of cyclones. In particular, the study of the temporal evolution of cyclones requires a robust depression tracking framework. To cope with this demand we propose a pipeline for the exploration of cyclones and their temporal evolution. This entails a generic framework for their identification and tracking. The fact that depressions and cyclones are not well-defined objects and their shape and size characteristics change over time makes this task especially challenging. Our method combines the robustness of topological approaches and the detailed tracking information from optical flow analysis. At first cyclones are identified within each time step based on well-established topological concepts. Then candidate tracks are computed from an optical flow field. These tracks are clustered within a moving time window to distill dominant coherent cyclone movements, which are then forwarded to a final tracking step. In contrast to previous methods our method requires only a few intuitive parameters. An integration into an exploratory framework helps in the study of cyclone movement by identifying smooth, representative tracks. Multiple case studies demonstrate the effectiveness of the method in tracking cyclones, both in the northern and southern hemisphere.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Analyzing depressions plays an important role in meteorology, especially in the study of cyclones. In particular, the study of the temporal evolution of cyclones requires a robust depression tracking framework. To cope with this demand we propose a pipeline for the exploration of cyclones and their temporal evolution. This entails a generic framework for their identification and tracking. The fact that depressions and cyclones are not well-defined objects and their shape and size characteristics change over time makes this task especially challenging. Our method combines the robustness of topological approaches and the detailed tracking information from optical flow analysis. At first cyclones are identified within each time step based on well-established topological concepts. Then candidate tracks are computed from an optical flow field. These tracks are clustered within a moving time window to distill dominant coherent cyclone movements, which are then forwarded to a final tracking step. In contrast to previous methods our method requires only a few intuitive parameters. An integration into an exploratory framework helps in the study of cyclone movement by identifying smooth, representative tracks. Multiple case studies demonstrate the effectiveness of the method in tracking cyclones, both in the northern and southern hemisphere.", "title": "An Exploratory Framework for Cyclone Identification and Tracking", "normalizedTitle": "An Exploratory Framework for Cyclone Identification and Tracking", "fno": "08303701", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Atmospheric Movements", "Image Sequences", "Object Tracking", "Storms", "Topology", "Candidate Tracks", "Dominant Coherent Cyclone Movements", "Final Tracking Step", "Exploratory Framework", "Cyclone Movement", "Smooth Tracks", "Cyclone Identification", "Temporal Evolution", "Robust Depression Tracking Framework", "Generic Framework", "Detailed Tracking Information", "Depression Analysis", "Cyclones", "Tracking", "Meteorology", "Object Tracking", "Image Sequences", "Storms", "Cyclone", "Scalar Field", "Time Varying Data", "Track Graph", "Spatio Temporal Clustering", "Tracking" ], "authors": [ { "givenName": "Akash Anil", "surname": "Valsangkar", "fullName": "Akash Anil Valsangkar", "affiliation": "Department of Computer Science and Automation, Indian Institute of Science, Bangalore, India", "__typename": "ArticleAuthorType" }, { "givenName": "Joy Merwin", "surname": "Monteiro", "fullName": "Joy Merwin Monteiro", "affiliation": "Department of Meteorology, Stockholm University, Stockholm, Sweden", "__typename": "ArticleAuthorType" }, { "givenName": "Vidya", "surname": "Narayanan", "fullName": "Vidya Narayanan", "affiliation": "Computer Science Department, Carnegie Mellon University, Pittsburgh, PA", "__typename": "ArticleAuthorType" }, { "givenName": "Ingrid", "surname": "Hotz", "fullName": "Ingrid Hotz", "affiliation": "Department of Science and Technology, Linkoping University, Linkoping, Sweden", "__typename": "ArticleAuthorType" }, { "givenName": "Vijay", "surname": "Natarajan", "fullName": "Vijay Natarajan", "affiliation": "Department of Computer Science and Automation, Indian Institute of Science, Bangalore, India", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2019-03-01 00:00:00", "pubType": "trans", "pages": "1460-1473", "year": "2019", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/crv/2013/4983/0/4983a273", "title": "Tracking Severe Storms Using a Pseudo Storm Concept", "doi": null, "abstractUrl": "/proceedings-article/crv/2013/4983a273/12OmNBSjISk", "parentPublication": { "id": "proceedings/crv/2013/4983/0", "title": "2013 International Conference on Computer and Robot Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/apwc-on-cse/2014/1955/0/07053845", "title": "An expert system to assess the landfall propensity of a tropical cyclone in Australia", "doi": null, "abstractUrl": "/proceedings-article/apwc-on-cse/2014/07053845/12OmNvpew9D", "parentPublication": { "id": "proceedings/apwc-on-cse/2014/1955/0", "title": "2014 Asia-Pacific World Congress on Computer Science and Engineering (APWC on CSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdma/2013/5016/0/5016a641", "title": "Grey Correlation Analysis of Tropical Cyclone Landing Time", "doi": null, "abstractUrl": "/proceedings-article/icdma/2013/5016a641/12OmNwBT1ml", "parentPublication": { "id": "proceedings/icdma/2013/5016/0", "title": "2013 Fourth International Conference on Digital Manufacturing & Automation (ICDMA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcmp-ugc/2010/986/0/06018005", "title": "Tropical Cyclone Track and Intensity Predictability", "doi": null, "abstractUrl": "/proceedings-article/hpcmp-ugc/2010/06018005/12OmNxEjY4p", "parentPublication": { "id": "proceedings/hpcmp-ugc/2010/986/0", "title": "2010 DoD High Performance Computing Modernization Program Users Group Conference (HPCMP-UGC 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/12/ttg2013122896", "title": "An Exploration Framework to Identify and Track Movement of Cloud Systems", "doi": null, "abstractUrl": "/journal/tg/2013/12/ttg2013122896/13rRUwvT9gt", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2013/05/mcs2013050056", "title": "Improving NASA's Multiscale Modeling Framework for Tropical Cyclone Climate Study", "doi": null, "abstractUrl": "/magazine/cs/2013/05/mcs2013050056/13rRUx0geji", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/01/08440837", "title": "Visualizing Uncertain Tropical Cyclone Predictions using Representative Samples from Ensembles of Forecast Tracks", "doi": null, "abstractUrl": "/journal/tg/2019/01/08440837/17D45XeKgnt", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/topoinvis/2022/9354/0/935400a092", "title": "Exploring Cyclone Evolution with Hierarchical Features", "doi": null, "abstractUrl": "/proceedings-article/topoinvis/2022/935400a092/1J2XMSlesRq", "parentPublication": { "id": "proceedings/topoinvis/2022/9354/0", "title": "2022 Topological Data Analysis and Visualization (TopoInVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hipc/2022/9423/0/942300a166", "title": "A Deep Learning-Based In Situ Analysis Framework for Tropical Cyclogenesis Prediction", "doi": null, "abstractUrl": "/proceedings-article/hipc/2022/942300a166/1MEXhBAeDyU", "parentPublication": { "id": "proceedings/hipc/2022/9423/0", "title": "2022 IEEE 29th International Conference on High Performance Computing, Data, and Analytics (HiPC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icecem/2021/1025/0/102500a141", "title": "Analytic Hierarchy Process (AHP) to analyze the tropical cyclone risk index of 15 coastal cities in China", "doi": null, "abstractUrl": "/proceedings-article/icecem/2021/102500a141/1zpEXbFBss0", "parentPublication": { "id": "proceedings/icecem/2021/1025/0", "title": "2021 International Conference on E-Commerce and E-Management (ICECEM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08301570", "articleId": "17D45W9KVHk", "__typename": "AdjacentArticleType" }, "next": { "fno": "08305493", "articleId": "17D45WaTkmp", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgBj", "name": "ttg201903-08303701s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201903-08303701s1.zip", "extension": "zip", "size": "6.67 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZprc", "title": "March", "year": "2019", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "25", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45WaTkmp", "doi": "10.1109/TVCG.2018.2810918", "abstract": "Recent visualization research efforts have incorporated experimental techniques and perceptual models from the vision science community. Perceptual laws such as Weber's law, for example, have been used to model the perception of correlation in scatterplots. While this thread of research has progressively refined the modeling of the perception of correlation in scatterplots, it remains unclear as to why such perception can be modeled using relatively simple functions, e.g., linear and log-linear. In this paper, we investigate a longstanding hypothesis that people use visual features in a chart as a proxy for statistical measures like correlation. For a given scatterplot, we extract 49 candidate visual features and evaluate which best align with existing models and participant judgments. The results support the hypothesis that people attend to a small number of visual features when discriminating correlation in scatterplots. We discuss how this result may account for prior conflicting findings, and how visual features provide a baseline for future model-based approaches in visualization evaluation and design.", "abstracts": [ { "abstractType": "Regular", "content": "Recent visualization research efforts have incorporated experimental techniques and perceptual models from the vision science community. Perceptual laws such as Weber's law, for example, have been used to model the perception of correlation in scatterplots. While this thread of research has progressively refined the modeling of the perception of correlation in scatterplots, it remains unclear as to why such perception can be modeled using relatively simple functions, e.g., linear and log-linear. In this paper, we investigate a longstanding hypothesis that people use visual features in a chart as a proxy for statistical measures like correlation. For a given scatterplot, we extract 49 candidate visual features and evaluate which best align with existing models and participant judgments. The results support the hypothesis that people attend to a small number of visual features when discriminating correlation in scatterplots. We discuss how this result may account for prior conflicting findings, and how visual features provide a baseline for future model-based approaches in visualization evaluation and design.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Recent visualization research efforts have incorporated experimental techniques and perceptual models from the vision science community. Perceptual laws such as Weber's law, for example, have been used to model the perception of correlation in scatterplots. While this thread of research has progressively refined the modeling of the perception of correlation in scatterplots, it remains unclear as to why such perception can be modeled using relatively simple functions, e.g., linear and log-linear. In this paper, we investigate a longstanding hypothesis that people use visual features in a chart as a proxy for statistical measures like correlation. For a given scatterplot, we extract 49 candidate visual features and evaluate which best align with existing models and participant judgments. The results support the hypothesis that people attend to a small number of visual features when discriminating correlation in scatterplots. We discuss how this result may account for prior conflicting findings, and how visual features provide a baseline for future model-based approaches in visualization evaluation and design.", "title": "Correlation Judgment and Visualization Features: A Comparative Study", "normalizedTitle": "Correlation Judgment and Visualization Features: A Comparative Study", "fno": "08305493", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualisation", "Feature Extraction", "Visual Perception", "Correlation Judgment", "Visualization Features", "Perceptual Models", "Vision Science Community", "Perceptual Laws", "Webers Law", "Scatterplots", "Visualization Evaluation", "Model Based Approaches", "Candidate Visual Features", "Correlation", "Visualization", "Data Visualization", "Psychology", "Feature Extraction", "Data Models", "Computational Modeling", "Information Visualization", "Perception And Psychophysics", "Evaluation Methodology", "Webers Law", "Power Law" ], "authors": [ { "givenName": "Fumeng", "surname": "Yang", "fullName": "Fumeng Yang", "affiliation": "Department of Computer Science, Brown University, Providence, RI", "__typename": "ArticleAuthorType" }, { "givenName": "Lane T.", "surname": "Harrison", "fullName": "Lane T. Harrison", "affiliation": "Department of Computer Science, Worcester Polytechnic Institute, Worcester, MA", "__typename": "ArticleAuthorType" }, { "givenName": "Ronald A.", "surname": "Rensink", "fullName": "Ronald A. Rensink", "affiliation": "Departments of Computer Science and Psychology, University of British Columbia, Vancouver, BC, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "Steven L.", "surname": "Franconeri", "fullName": "Steven L. Franconeri", "affiliation": "Psychology Department, Northwestern University, Evanston, IL", "__typename": "ArticleAuthorType" }, { "givenName": "Remco", "surname": "Chang", "fullName": "Remco Chang", "affiliation": "Department of Computer Science, Tufts University, Medford, MA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2019-03-01 00:00:00", "pubType": "trans", "pages": "1474-1488", "year": "2019", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/pacificvis/2010/6685/0/05429604", "title": "A model of symbol lightness discrimination in sparse scatterplots", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2010/05429604/12OmNBSSVnf", "parentPublication": { "id": "proceedings/pacificvis/2010/6685/0", "title": "2010 IEEE Pacific Visualization Symposium (PacificVis 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigmm/2017/6549/0/07966736", "title": "Correlation-Based Background Music Recommendation by Incorporating Temporal Sequence of Local Features", "doi": null, "abstractUrl": "/proceedings-article/bigmm/2017/07966736/12OmNxvwp0d", "parentPublication": { "id": "proceedings/bigmm/2017/6549/0", "title": "2017 IEEE Third International Conference on Multimedia Big Data (BigMM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2015/9711/0/5720a621", "title": "Convolutional Features for Correlation Filter Based Visual Tracking", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2015/5720a621/12OmNzT7Oww", "parentPublication": { "id": "proceedings/iccvw/2015/9711/0", "title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/01/08022891", "title": "Priming and Anchoring Effects in Visualization", "doi": null, "abstractUrl": "/journal/tg/2018/01/08022891/13rRUwbaqLz", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/03/08490694", "title": "ScatterNet: A Deep Subjective Similarity Model for Visual Analysis of Scatterplots", "doi": null, "abstractUrl": "/journal/tg/2020/03/08490694/14jQfPkRijD", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600i741", "title": "Correlation-Aware Deep Tracking", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600i741/1H1nc0OVnfG", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08794768", "title": "Evaluating Perceptual Bias During Geometric Scaling of Scatterplots", "doi": null, "abstractUrl": "/journal/tg/2020/01/08794768/1cr2ZlCC2xG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2022/02/09039622", "title": "Fusing of Electroencephalogram and Eye Movement With Group Sparse Canonical Correlation Analysis for Anxiety Detection", "doi": null, "abstractUrl": "/journal/ta/2022/02/09039622/1igRZVW87EQ", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/04/09195155", "title": "Words of Estimative Correlation: Studying Verbalizations of Scatterplots", "doi": null, "abstractUrl": "/journal/tg/2022/04/09195155/1n2jl9RTLBm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2020/9134/0/913400a038", "title": "Why Two Y-Axes (Y2Y): A Case Study for Visual Correlation with Dual Axes", "doi": null, "abstractUrl": "/proceedings-article/iv/2020/913400a038/1rSR97vRDy0", "parentPublication": { "id": "proceedings/iv/2020/9134/0", "title": "2020 24th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08303701", "articleId": "17D45W2WyxH", "__typename": "AdjacentArticleType" }, "next": { "fno": "08314702", "articleId": "17D45VUZMUW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgFo", "name": "ttg201903-08305493s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201903-08305493s1.zip", "extension": "zip", "size": "5.04 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZprc", "title": "March", "year": "2019", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "25", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45VUZMUW", "doi": "10.1109/TVCG.2018.2814987", "abstract": "In Computer-Aided Design (CAD), Non-Uniform Rational B-Splines (NURBS) are a common model representation for export, simulation and visualization. In this paper, we present a direct rendering method for trimmed NURBS models based on their parametric description. Our approach builds on a novel trimming method and a three-pass pipeline which both allow for a sub-pixel precise visualization. The rendering pipeline bypasses tessellation limitations of current hardware using a feedback mechanism. In contrast to existing work, our trimming method scales well with a large number of trim curves and estimates the trimmed surface's footprint in screen-space which allows for an anti-aliasing with minimal performance overhead. Fragments with trimmed edges are routed into a designated off-screen buffer for subsequent blending with background faces. The evaluation of the presented algorithms shows that our rendering system can handle CAD models with ten thousands of trimmed NURBS surfaces. The suggested two-level data structure used for trimming outperforms state-of-the-art methods while being more precise and memory efficient. Our curve coverage estimation used for anti-aliasing provides an efficient trade-off between quality and performance compared to multisampling or screen-space anti-aliasing approaches.", "abstracts": [ { "abstractType": "Regular", "content": "In Computer-Aided Design (CAD), Non-Uniform Rational B-Splines (NURBS) are a common model representation for export, simulation and visualization. In this paper, we present a direct rendering method for trimmed NURBS models based on their parametric description. Our approach builds on a novel trimming method and a three-pass pipeline which both allow for a sub-pixel precise visualization. The rendering pipeline bypasses tessellation limitations of current hardware using a feedback mechanism. In contrast to existing work, our trimming method scales well with a large number of trim curves and estimates the trimmed surface's footprint in screen-space which allows for an anti-aliasing with minimal performance overhead. Fragments with trimmed edges are routed into a designated off-screen buffer for subsequent blending with background faces. The evaluation of the presented algorithms shows that our rendering system can handle CAD models with ten thousands of trimmed NURBS surfaces. The suggested two-level data structure used for trimming outperforms state-of-the-art methods while being more precise and memory efficient. Our curve coverage estimation used for anti-aliasing provides an efficient trade-off between quality and performance compared to multisampling or screen-space anti-aliasing approaches.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In Computer-Aided Design (CAD), Non-Uniform Rational B-Splines (NURBS) are a common model representation for export, simulation and visualization. In this paper, we present a direct rendering method for trimmed NURBS models based on their parametric description. Our approach builds on a novel trimming method and a three-pass pipeline which both allow for a sub-pixel precise visualization. The rendering pipeline bypasses tessellation limitations of current hardware using a feedback mechanism. In contrast to existing work, our trimming method scales well with a large number of trim curves and estimates the trimmed surface's footprint in screen-space which allows for an anti-aliasing with minimal performance overhead. Fragments with trimmed edges are routed into a designated off-screen buffer for subsequent blending with background faces. The evaluation of the presented algorithms shows that our rendering system can handle CAD models with ten thousands of trimmed NURBS surfaces. The suggested two-level data structure used for trimming outperforms state-of-the-art methods while being more precise and memory efficient. Our curve coverage estimation used for anti-aliasing provides an efficient trade-off between quality and performance compared to multisampling or screen-space anti-aliasing approaches.", "title": "Efficient and Anti-Aliased Trimming for Rendering Large NURBS Models", "normalizedTitle": "Efficient and Anti-Aliased Trimming for Rendering Large NURBS Models", "fno": "08314702", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Solid Modeling", "Rendering Computer Graphics", "Splines Mathematics", "Surface Topography", "Surface Reconstruction", "Computational Modeling", "Hardware", "Trimming", "NURBS", "Anti Aliasing", "Adaptive Tessellation" ], "authors": [ { "givenName": "Andre", "surname": "Schollmeyer", "fullName": "Andre Schollmeyer", "affiliation": "Faculty of Media, Bauhaus-Universitaet Weimar, Weimar, Thuringia, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Bernd", "surname": "Froehlich", "fullName": "Bernd Froehlich", "affiliation": "Faculty of Media, Bauhaus-Universitaet Weimar, Weimar, Thuringia, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2019-03-01 00:00:00", "pubType": "trans", "pages": "1489-1498", "year": "2019", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/rt/2006/0693/0/04061557", "title": "Ray Casting of Trimmed NURBS Surfaces on the GPU", "doi": null, "abstractUrl": "/proceedings-article/rt/2006/04061557/12OmNBNM8TN", "parentPublication": { "id": "proceedings/rt/2006/0693/0", "title": "IEEE Symposium on Interactive Ray Tracing 2006", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gmp/2004/2078/0/20780056", "title": "Smooth Trimmed NURBS Surface Connection with Tension Control", "doi": null, "abstractUrl": "/proceedings-article/gmp/2004/20780056/12OmNBiygAO", "parentPublication": { "id": "proceedings/gmp/2004/2078/0", "title": "Geometric Modeling and Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisce/2017/3013/0/3013a802", "title": "A Method for Calculation of Hydrodynamic Coefficients Based on NURBS", "doi": null, "abstractUrl": "/proceedings-article/icisce/2017/3013a802/12OmNBzRNpF", "parentPublication": { "id": "proceedings/icisce/2017/3013/0", "title": "2017 4th International Conference on Information Science and Control Engineering (ICISCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccsa/2008/3243/0/3243a514", "title": "NURBS Fusion", "doi": null, "abstractUrl": "/proceedings-article/iccsa/2008/3243a514/12OmNqIhFXy", "parentPublication": { "id": "proceedings/iccsa/2008/3243/0", "title": "2008 International Conference on Computational Sciences and Its Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/maee/2013/4975/0/4975a080", "title": "Research on a New Linear Interpolation Algorithm of NURBS Curve", "doi": null, "abstractUrl": "/proceedings-article/maee/2013/4975a080/12OmNxwWoum", "parentPublication": { "id": "proceedings/maee/2013/4975/0", "title": "2013 International Conference on Mechanical and Automation Engineering (MAEE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/09/06846294", "title": "Direct Isosurface Ray Casting of NURBS-Based Isogeometric Analysis", "doi": null, "abstractUrl": "/journal/tg/2014/09/06846294/13rRUwvT9gu", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisce/2018/5500/0/550000a073", "title": "A Note on the Convergence of NURBS Curves When Weights Approach Infinity", "doi": null, "abstractUrl": "/proceedings-article/icisce/2018/550000a073/17D45WXIkzJ", "parentPublication": { "id": "proceedings/icisce/2018/5500/0", "title": "2018 5th International Conference on Information Science and Control Engineering (ICISCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tb/2018/06/07888512", "title": "Immunological Approach for Full NURBS Reconstruction of Outline Curves from Noisy Data Points in Medical Imaging", "doi": null, "abstractUrl": "/journal/tb/2018/06/07888512/17D45WZZ7FE", "parentPublication": { "id": "trans/tb", "title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmcce/2018/8481/0/848100a097", "title": "Linear Motor Platform Contouring Control Based on NURBS Curve Interpolation", "doi": null, "abstractUrl": "/proceedings-article/icmcce/2018/848100a097/17D45XwUAKx", "parentPublication": { "id": "proceedings/icmcce/2018/8481/0", "title": "2018 3rd International Conference on Mechanical, Control and Computer Engineering (ICMCCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wcmeim/2019/5045/0/504500a645", "title": "Research on Adaptive Feedrate Planning of NURBS Curves for CNC System", "doi": null, "abstractUrl": "/proceedings-article/wcmeim/2019/504500a645/1hHLnPQuEG4", "parentPublication": { "id": "proceedings/wcmeim/2019/5045/0", "title": "2019 2nd World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08305493", "articleId": "17D45WaTkmp", "__typename": "AdjacentArticleType" }, "next": { "fno": "08305502", "articleId": "17D45WWzW55", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgx1", "name": "ttg201903-08314702s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201903-08314702s1.zip", "extension": "zip", "size": "255 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZprc", "title": "March", "year": "2019", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "25", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45WWzW55", "doi": "10.1109/TVCG.2018.2810919", "abstract": "Ensemble simulations are used in climate research to account for natural variability. For medium-term decadal predictions, each simulation run is initialized with real observations from a different day resulting in a set of possible climatic futures. Understanding the variability and the predictive power in this wealth of data is still a challenging task. In this paper, we introduce a visual analytics system to explore variability within ensembles of decadal climate predictions. We propose a new interactive visualization technique (clustering timeline) based on the Sankey diagram, which conveys a concise summary of data similarity and its changes over time. We augment the system with two additional visualizations, filled contour maps and heatmaps, to provide analysts with additional information relating the new diagram to raw data and automatic clustering results. The usefulness of the technique is demonstrated by case studies and user interviews.", "abstracts": [ { "abstractType": "Regular", "content": "Ensemble simulations are used in climate research to account for natural variability. For medium-term decadal predictions, each simulation run is initialized with real observations from a different day resulting in a set of possible climatic futures. Understanding the variability and the predictive power in this wealth of data is still a challenging task. In this paper, we introduce a visual analytics system to explore variability within ensembles of decadal climate predictions. We propose a new interactive visualization technique (clustering timeline) based on the Sankey diagram, which conveys a concise summary of data similarity and its changes over time. We augment the system with two additional visualizations, filled contour maps and heatmaps, to provide analysts with additional information relating the new diagram to raw data and automatic clustering results. The usefulness of the technique is demonstrated by case studies and user interviews.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Ensemble simulations are used in climate research to account for natural variability. For medium-term decadal predictions, each simulation run is initialized with real observations from a different day resulting in a set of possible climatic futures. Understanding the variability and the predictive power in this wealth of data is still a challenging task. In this paper, we introduce a visual analytics system to explore variability within ensembles of decadal climate predictions. We propose a new interactive visualization technique (clustering timeline) based on the Sankey diagram, which conveys a concise summary of data similarity and its changes over time. We augment the system with two additional visualizations, filled contour maps and heatmaps, to provide analysts with additional information relating the new diagram to raw data and automatic clustering results. The usefulness of the technique is demonstrated by case studies and user interviews.", "title": "Exploring Variability within Ensembles of Decadal Climate Predictions", "normalizedTitle": "Exploring Variability within Ensembles of Decadal Climate Predictions", "fno": "08305502", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualization", "Meteorology", "Atmospheric Modeling", "Data Models", "Computational Modeling", "Analytical Models", "Predictive Models", "Clustering", "Ensemble Simulations", "Climate Research", "Visual Analysis" ], "authors": [ { "givenName": "Christopher P.", "surname": "Kappe", "fullName": "Christopher P. Kappe", "affiliation": "Department of Computer Science, TU Kaiserslautern, Kaiserslautern, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Michael", "surname": "Böttinger", "fullName": "Michael Böttinger", "affiliation": "Deutsches Klimarechenzentrum GmbH, Hamburg, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Heike", "surname": "Leitte", "fullName": "Heike Leitte", "affiliation": "Department of Computer Science, TU Kaiserslautern, Kaiserslautern, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2019-03-01 00:00:00", "pubType": "trans", "pages": "1499-1512", "year": "2019", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/e-science/2015/9325/0/9325a108", "title": "From HPC Performance to Climate Modeling: Transforming Methods for HPC Predictions into Models of Extreme Climate Conditions", "doi": null, "abstractUrl": "/proceedings-article/e-science/2015/9325a108/12OmNB06l60", "parentPublication": { "id": "proceedings/e-science/2015/9325/0", "title": "2015 IEEE 11th International Conference on e-Science (e-Science)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2016/9005/0/07841098", "title": "HPC infrastructure to support the next-generation ARM facility data operations", "doi": null, "abstractUrl": "/proceedings-article/big-data/2016/07841098/12OmNvk7JMZ", "parentPublication": { "id": "proceedings/big-data/2016/9005/0", "title": "2016 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/so/2011/06/mso2011060043", "title": "Managing Software Complexity and Variability in Coupled Climate Models", "doi": null, "abstractUrl": "/magazine/so/2011/06/mso2011060043/13rRUwghd7n", "parentPublication": { "id": "mags/so", "title": "IEEE Software", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2015/05/mcs2015050049", "title": "Putting Regional Climate Prediction in Reach", "doi": null, "abstractUrl": "/magazine/cs/2015/05/mcs2015050049/13rRUy0ZzWh", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2015/06/mcs2015060043", "title": "Can Topic Modeling Shed Light on Climate Extremes?", "doi": null, "abstractUrl": "/magazine/cs/2015/06/mcs2015060043/13rRUyYBlcf", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2015/06/mcs2015060009", "title": "Climate Computing: The State of Play", "doi": null, "abstractUrl": "/magazine/cs/2015/06/mcs2015060009/13rRUyZaxu4", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2018/9288/0/928800a758", "title": "Extreme Values from Spatiotemporal Chaos: Precipitation Extremes and Climate Variability", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2018/928800a758/18jXFBH7KI8", "parentPublication": { "id": "proceedings/icdmw/2018/9288/0", "title": "2018 IEEE International Conference on Data Mining Workshops (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/escience/2019/2451/0/245100a498", "title": "ESiWACE: On European Infrastructure Efforts for Weather and Climate Modeling at Exascale", "doi": null, "abstractUrl": "/proceedings-article/escience/2019/245100a498/1ike1oTsoZa", "parentPublication": { "id": "proceedings/escience/2019/2451/0", "title": "2019 15th International Conference on eScience (eScience)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2021/01/09281098", "title": "Dynamic 3-D Visualization of Climate Model Development and Results", "doi": null, "abstractUrl": "/magazine/cg/2021/01/09281098/1phO0N1Fhte", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2021/06/09573307", "title": "On Preserving Scientific Integrity for Climate Model Data in the HPC Era", "doi": null, "abstractUrl": "/magazine/cs/2021/06/09573307/1xH5FqO6mtO", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08314702", "articleId": "17D45VUZMUW", "__typename": "AdjacentArticleType" }, "next": { "fno": "08283576", "articleId": "17D45XcttjZ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZprc", "title": "March", "year": "2019", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "25", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45XcttjZ", "doi": "10.1109/TVCG.2018.2802926", "abstract": "The increasing interest for reliable generation of large scale scenes and objects has facilitated several real-time applications. Although the resolution of the new generation geometry scanners are constantly improving, the output models, are inevitably noisy, requiring sophisticated approaches that remove noise while preserving sharp features. Moreover, we no longer deal exclusively with individual shapes, but with entire scenes resulting in a sequence of 3D surfaces that are affected by noise with different characteristics due to variable environmental factors (e.g., lighting conditions, orientation of the scanning device). In this work, we introduce a novel coarse-to-fine graph spectral processing approach that exploits the fact that the sharp features reside in a low dimensional structure hidden in the noisy 3D dataset. In the coarse step, the mesh is processed in parts, using a model based Bayesian learning method that identifies the noise level in each part and the subspace where the features lie. In the feature-aware fine step, we iteratively smooth face normals and vertices, while preserving geometric features. Extensive evaluation studies carried out under a broad set of complex noise patterns verify the superiority of our approach as compared to the state-of-the-art schemes, in terms of reconstruction quality and computational complexity.", "abstracts": [ { "abstractType": "Regular", "content": "The increasing interest for reliable generation of large scale scenes and objects has facilitated several real-time applications. Although the resolution of the new generation geometry scanners are constantly improving, the output models, are inevitably noisy, requiring sophisticated approaches that remove noise while preserving sharp features. Moreover, we no longer deal exclusively with individual shapes, but with entire scenes resulting in a sequence of 3D surfaces that are affected by noise with different characteristics due to variable environmental factors (e.g., lighting conditions, orientation of the scanning device). In this work, we introduce a novel coarse-to-fine graph spectral processing approach that exploits the fact that the sharp features reside in a low dimensional structure hidden in the noisy 3D dataset. In the coarse step, the mesh is processed in parts, using a model based Bayesian learning method that identifies the noise level in each part and the subspace where the features lie. In the feature-aware fine step, we iteratively smooth face normals and vertices, while preserving geometric features. Extensive evaluation studies carried out under a broad set of complex noise patterns verify the superiority of our approach as compared to the state-of-the-art schemes, in terms of reconstruction quality and computational complexity.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The increasing interest for reliable generation of large scale scenes and objects has facilitated several real-time applications. Although the resolution of the new generation geometry scanners are constantly improving, the output models, are inevitably noisy, requiring sophisticated approaches that remove noise while preserving sharp features. Moreover, we no longer deal exclusively with individual shapes, but with entire scenes resulting in a sequence of 3D surfaces that are affected by noise with different characteristics due to variable environmental factors (e.g., lighting conditions, orientation of the scanning device). In this work, we introduce a novel coarse-to-fine graph spectral processing approach that exploits the fact that the sharp features reside in a low dimensional structure hidden in the noisy 3D dataset. In the coarse step, the mesh is processed in parts, using a model based Bayesian learning method that identifies the noise level in each part and the subspace where the features lie. In the feature-aware fine step, we iteratively smooth face normals and vertices, while preserving geometric features. Extensive evaluation studies carried out under a broad set of complex noise patterns verify the superiority of our approach as compared to the state-of-the-art schemes, in terms of reconstruction quality and computational complexity.", "title": "Feature Preserving Mesh Denoising Based on Graph Spectral Processing", "normalizedTitle": "Feature Preserving Mesh Denoising Based on Graph Spectral Processing", "fno": "08283576", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Noise Reduction", "Three Dimensional Displays", "Feature Extraction", "Solid Modeling", "Face", "Noise Measurement", "Surface Treatment", "Spectral Smoothing", "Orthogonal Iteration", "Spectral Denoising Filtering", "Feature Extraction", "Level Noise Estimation" ], "authors": [ { "givenName": "Gerasimos", "surname": "Arvanitis", "fullName": "Gerasimos Arvanitis", "affiliation": "Department of Electrical and Computer Engineering, University of Patras, Rio, Patras, Greece", "__typename": "ArticleAuthorType" }, { "givenName": "Aris S.", "surname": "Lalos", "fullName": "Aris S. Lalos", "affiliation": "Department of Electrical and Computer Engineering, University of Patras, Rio, Patras, Greece", "__typename": "ArticleAuthorType" }, { "givenName": "Konstantinos", "surname": "Moustakas", "fullName": "Konstantinos Moustakas", "affiliation": "Department of Electrical and Computer Engineering, University of Patras, Rio, Patras, Greece", "__typename": "ArticleAuthorType" }, { "givenName": "Nikos", "surname": "Fakotakis", "fullName": "Nikos Fakotakis", "affiliation": "Department of Electrical and Computer Engineering, University of Patras, Rio, Patras, Greece", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2019-03-01 00:00:00", "pubType": "trans", "pages": "1513-1527", "year": "2019", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/sibgrapi/2010/8420/0/05720332", "title": "Mesh Denoising Using Quadric Error Metric", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2010/05720332/12OmNxecRQw", "parentPublication": { "id": "proceedings/sibgrapi/2010/8420/0", "title": "2010 23rd SIBGRAPI Conference on Graphics, Patterns and Images", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2016/5407/0/5407a083", "title": "Robust Feature-Preserving Denoising of 3D Point Clouds", "doi": null, "abstractUrl": "/proceedings-article/3dv/2016/5407a083/12OmNyRxFIQ", "parentPublication": { "id": "proceedings/3dv/2016/5407/0", "title": "2016 Fourth International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/05/04276075", "title": "Fast and Effective Feature-Preserving Mesh Denoising", "doi": null, "abstractUrl": "/journal/tg/2007/05/04276075/13rRUwkxc5j", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/08/08012522", "title": "Mesh Denoising Based on Normal Voting Tensor and Binary Optimization", "doi": null, "abstractUrl": "/journal/tg/2018/08/08012522/13rRUx0PqpA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/01/06822598", "title": "Bi-Normal Filtering for Mesh Denoising", "doi": null, "abstractUrl": "/journal/tg/2015/01/06822598/13rRUxYINff", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/06/08344461", "title": "Robust and High Fidelity Mesh Denoising", "doi": null, "abstractUrl": "/journal/tg/2019/06/08344461/13rRUxcbnHm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/10/08434353", "title": "Mesh Denoising Guided by Patch Normal Co-Filtering via Kernel Low-Rank Recovery", "doi": null, "abstractUrl": "/journal/tg/2019/10/08434353/13rRUy2YLYE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2018/9264/0/926400a001", "title": "Adaptive Patches for Mesh Denoising", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2018/926400a001/17D45XvMce1", "parentPublication": { "id": "proceedings/sibgrapi/2018/9264/0", "title": "2018 31st SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2019/3131/0/313100a415", "title": "Optimal, Non-Rigid Alignment for Feature-Preserving Mesh Denoising", "doi": null, "abstractUrl": "/proceedings-article/3dv/2019/313100a415/1ezRABID16w", "parentPublication": { "id": "proceedings/3dv/2019/3131/0", "title": "2019 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/08/09296808", "title": "Mesh Denoising With Facet Graph Convolutions", "doi": null, "abstractUrl": "/journal/tg/2022/08/09296808/1pDnJLfMBWg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08305502", "articleId": "17D45WWzW55", "__typename": "AdjacentArticleType" }, "next": { "fno": "08304678", "articleId": "17D45WaTkk5", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXWRXn", "name": "ttg201903-08283576s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201903-08283576s1.zip", "extension": "zip", "size": "108 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZprc", "title": "March", "year": "2019", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "25", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45WaTkk5", "doi": "10.1109/TVCG.2017.2785271", "abstract": "In 2014, more than 10 million people in the US were affected by an ambulatory disability. Thus, gait rehabilitation is a crucial part of health care systems. The quantification of human locomotion enables clinicians to describe and analyze a patient's gait performance in detail and allows them to base clinical decisions on objective data. These assessments generate a vast amount of complex data which need to be interpreted in a short time period. We conducted a design study in cooperation with gait analysis experts to develop a novel Knowledge-Assisted Visual Analytics solution for clinical Gait analysis (KAVAGait). KAVAGait allows the clinician to store and inspect complex data derived during clinical gait analysis. The system incorporates innovative and interactive visual interface concepts, which were developed based on the needs of clinicians. Additionally, an explicit knowledge store (EKS) allows externalization and storage of implicit knowledge from clinicians. It makes this information available for others, supporting the process of data inspection and clinical decision making. We validated our system by conducting expert reviews, a user study, and a case study. Results suggest that KAVAGait is able to support a clinician during clinical practice by visualizing complex gait data and providing knowledge of other clinicians.", "abstracts": [ { "abstractType": "Regular", "content": "In 2014, more than 10 million people in the US were affected by an ambulatory disability. Thus, gait rehabilitation is a crucial part of health care systems. The quantification of human locomotion enables clinicians to describe and analyze a patient's gait performance in detail and allows them to base clinical decisions on objective data. These assessments generate a vast amount of complex data which need to be interpreted in a short time period. We conducted a design study in cooperation with gait analysis experts to develop a novel Knowledge-Assisted Visual Analytics solution for clinical Gait analysis (KAVAGait). KAVAGait allows the clinician to store and inspect complex data derived during clinical gait analysis. The system incorporates innovative and interactive visual interface concepts, which were developed based on the needs of clinicians. Additionally, an explicit knowledge store (EKS) allows externalization and storage of implicit knowledge from clinicians. It makes this information available for others, supporting the process of data inspection and clinical decision making. We validated our system by conducting expert reviews, a user study, and a case study. Results suggest that KAVAGait is able to support a clinician during clinical practice by visualizing complex gait data and providing knowledge of other clinicians.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In 2014, more than 10 million people in the US were affected by an ambulatory disability. Thus, gait rehabilitation is a crucial part of health care systems. The quantification of human locomotion enables clinicians to describe and analyze a patient's gait performance in detail and allows them to base clinical decisions on objective data. These assessments generate a vast amount of complex data which need to be interpreted in a short time period. We conducted a design study in cooperation with gait analysis experts to develop a novel Knowledge-Assisted Visual Analytics solution for clinical Gait analysis (KAVAGait). KAVAGait allows the clinician to store and inspect complex data derived during clinical gait analysis. The system incorporates innovative and interactive visual interface concepts, which were developed based on the needs of clinicians. Additionally, an explicit knowledge store (EKS) allows externalization and storage of implicit knowledge from clinicians. It makes this information available for others, supporting the process of data inspection and clinical decision making. We validated our system by conducting expert reviews, a user study, and a case study. Results suggest that KAVAGait is able to support a clinician during clinical practice by visualizing complex gait data and providing knowledge of other clinicians.", "title": "KAVAGait: Knowledge-Assisted Visual Analytics for Clinical Gait Analysis", "normalizedTitle": "KAVAGait: Knowledge-Assisted Visual Analytics for Clinical Gait Analysis", "fno": "08304678", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualisation", "Decision Making", "Gait Analysis", "Health Care", "Patient Rehabilitation", "Clinical Gait Analysis", "KAVA Gait", "Explicit Knowledge Store", "Data Inspection", "Clinical Decision Making", "Complex Gait Data", "Gait Rehabilitation", "Health Care Systems", "Patient Rehabilitation", "Knowledge Assisted Visual Analytics", "Ambulatory Disability", "Human Locomotion", "Data Visualization", "Visual Analytics", "Time Series Analysis", "Decision Making", "Tools", "Task Analysis", "Design Study", "Interface Design", "Knowledge Generation", "Knowledge Assisted", "Visualization", "Visual Analytics", "Gait Analysis" ], "authors": [ { "givenName": "Markus", "surname": "Wagner", "fullName": "Markus Wagner", "affiliation": "St. Pölten University of Applied Sciences, St. Pölten, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Djordje", "surname": "Slijepcevic", "fullName": "Djordje Slijepcevic", "affiliation": "St. Pölten University of Applied Sciences, St. Pölten, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Brian", "surname": "Horsak", "fullName": "Brian Horsak", "affiliation": "St. Pölten University of Applied Sciences, St. Pölten, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Alexander", "surname": "Rind", "fullName": "Alexander Rind", "affiliation": "St. Pölten University of Applied Sciences, St. Pölten, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Matthias", "surname": "Zeppelzauer", "fullName": "Matthias Zeppelzauer", "affiliation": "St. Pölten University of Applied Sciences, St. Pölten, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Wolfgang", "surname": "Aigner", "fullName": "Wolfgang Aigner", "affiliation": "St. Pölten University of Applied Sciences, St. Pölten, Austria", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "03", "pubDate": "2019-03-01 00:00:00", "pubType": "trans", "pages": "1528-1542", "year": "2019", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ichi/2014/5701/0/5701a202", "title": "Clinical Pathway Support System", "doi": null, "abstractUrl": "/proceedings-article/ichi/2014/5701a202/12OmNA0vnXS", "parentPublication": { "id": "proceedings/ichi/2014/5701/0", "title": "2014 IEEE International Conference on Healthcare Informatics (ICHI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vahc/2017/3187/0/08387499", "title": "Visual analytics for evaluating clinical pathways", "doi": null, "abstractUrl": "/proceedings-article/vahc/2017/08387499/12OmNAle6wG", "parentPublication": { "id": "proceedings/vahc/2017/3187/0", "title": "2017 IEEE Workshop on Visual Analytics in Healthcare (VAHC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itme/2015/8302/0/8302a018", "title": "A Knowledge Base Driven Clinical Pharmacist Information System", "doi": null, "abstractUrl": "/proceedings-article/itme/2015/8302a018/12OmNqBbI03", "parentPublication": { "id": "proceedings/itme/2015/8302/0", "title": "2015 7th International Conference on Information Technology in Medicine and Education (ITME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2012/4771/0/4771a291", "title": "Visualizing Clinical Trial Data Using Pluggable Components", "doi": null, "abstractUrl": "/proceedings-article/iv/2012/4771a291/12OmNqJ8tmY", "parentPublication": { "id": "proceedings/iv/2012/4771/0", "title": "2012 16th International Conference on Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2013/4892/0/4892c416", "title": "Visual Analytics for Public Health: Supporting Knowledge Construction and Decision-Making", "doi": null, "abstractUrl": "/proceedings-article/hicss/2013/4892c416/12OmNrJiCNq", "parentPublication": { "id": "proceedings/hicss/2013/4892/0", "title": "2013 46th Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09939115", "title": "DocFlow: A Visual Analytics System for Question-based Document Retrieval and Categorization", "doi": null, "abstractUrl": "/journal/tg/5555/01/09939115/1I1KuH1xVF6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/trex/2022/9356/0/935600a008", "title": "Trustworthy Visual Analytics in Clinical Gait Analysis: A Case Study for Patients with Cerebral Palsy", "doi": null, "abstractUrl": "/proceedings-article/trex/2022/935600a008/1J9BkDHcAz6", "parentPublication": { "id": "proceedings/trex/2022/9356/0", "title": "2022 IEEE Workshop on TRust and EXpertise in Visual Analytics (TREX)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ichi/2020/5382/0/09374365", "title": "Machine Learning Based Clinical Decision Support and Clinician Trust", "doi": null, "abstractUrl": "/proceedings-article/ichi/2020/09374365/1rUIXSTum4M", "parentPublication": { "id": "proceedings/ichi/2020/5382/0", "title": "2020 IEEE International Conference on Healthcare Informatics (ICHI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09413226", "title": "Video Analytics Gait Trend Measurement for Fall Prevention and Health Monitoring", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09413226/1tmiei3JpHG", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/01/09555810", "title": "VBridge: Connecting the Dots Between Features and Data to Explain Healthcare Models", "doi": null, "abstractUrl": "/journal/tg/2022/01/09555810/1xlw2uJhEXe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08283576", "articleId": "17D45XcttjZ", "__typename": "AdjacentArticleType" }, "next": { "fno": "08307265", "articleId": "17D45Xbl4Qj", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgFC", "name": "ttg201903-08304678s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201903-08304678s1.zip", "extension": "zip", "size": "14.9 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZprc", "title": "March", "year": "2019", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "25", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45Xbl4Qj", "doi": "10.1109/TVCG.2018.2811488", "abstract": "The majority of diseases that are a significant challenge for public and individual heath are caused by a combination of hereditary and environmental factors. In this paper we introduce Lineage, a novel visual analysis tool designed to support domain experts who study such multifactorial diseases in the context of genealogies. Incorporating familial relationships between cases with other data can provide insights into shared genomic variants and shared environmental exposures that may be implicated in such diseases. We introduce a data and task abstraction, and argue that the problem of analyzing such diseases based on genealogical, clinical, and genetic data can be mapped to a multivariate graph visualization problem. The main contribution of our design study is a novel visual representation for tree-like, multivariate graphs, which we apply to genealogies and clinical data about the individuals in these families. We introduce data-driven aggregation methods to scale to multiple families. By designing the genealogy graph layout to align with a tabular view, we are able to incorporate extensive, multivariate attributes in the analysis of the genealogy without cluttering the graph. We validate our designs by conducting case studies with our domain collaborators.", "abstracts": [ { "abstractType": "Regular", "content": "The majority of diseases that are a significant challenge for public and individual heath are caused by a combination of hereditary and environmental factors. In this paper we introduce Lineage, a novel visual analysis tool designed to support domain experts who study such multifactorial diseases in the context of genealogies. Incorporating familial relationships between cases with other data can provide insights into shared genomic variants and shared environmental exposures that may be implicated in such diseases. We introduce a data and task abstraction, and argue that the problem of analyzing such diseases based on genealogical, clinical, and genetic data can be mapped to a multivariate graph visualization problem. The main contribution of our design study is a novel visual representation for tree-like, multivariate graphs, which we apply to genealogies and clinical data about the individuals in these families. We introduce data-driven aggregation methods to scale to multiple families. By designing the genealogy graph layout to align with a tabular view, we are able to incorporate extensive, multivariate attributes in the analysis of the genealogy without cluttering the graph. We validate our designs by conducting case studies with our domain collaborators.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The majority of diseases that are a significant challenge for public and individual heath are caused by a combination of hereditary and environmental factors. In this paper we introduce Lineage, a novel visual analysis tool designed to support domain experts who study such multifactorial diseases in the context of genealogies. Incorporating familial relationships between cases with other data can provide insights into shared genomic variants and shared environmental exposures that may be implicated in such diseases. We introduce a data and task abstraction, and argue that the problem of analyzing such diseases based on genealogical, clinical, and genetic data can be mapped to a multivariate graph visualization problem. The main contribution of our design study is a novel visual representation for tree-like, multivariate graphs, which we apply to genealogies and clinical data about the individuals in these families. We introduce data-driven aggregation methods to scale to multiple families. By designing the genealogy graph layout to align with a tabular view, we are able to incorporate extensive, multivariate attributes in the analysis of the genealogy without cluttering the graph. We validate our designs by conducting case studies with our domain collaborators.", "title": "Lineage: Visualizing Multivariate Clinical Data in Genealogy Graphs", "normalizedTitle": "Lineage: Visualizing Multivariate Clinical Data in Genealogy Graphs", "fno": "08307265", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Biology Computing", "Data Visualisation", "Genetics", "Genomics", "Molecular Biophysics", "Lineage", "Multivariate Clinical Data", "Genealogy Graphs", "Public Heath", "Individual Heath", "Hereditary Factors", "Environmental Factors", "Domain Experts", "Multifactorial Diseases", "Shared Genomic Variants", "Genealogical Data", "Genetic Data", "Multivariate Graph Visualization Problem", "Genealogy Graph Layout", "Visual Analysis Tool", "Clinical Data", "Visual Representation", "Familial Relationships", "Genetics", "Diseases", "Data Visualization", "Tools", "Task Analysis", "Environmental Factors", "Sociology", "Multivariate Networks", "Biology Visualization", "Genealogies", "Hereditary Genetics", "Multifactorial Diseases" ], "authors": [ { "givenName": "Carolina", "surname": "Nobre", "fullName": "Carolina Nobre", "affiliation": "University of Utah, Salt Lake City, UT", "__typename": "ArticleAuthorType" }, { "givenName": "Nils", "surname": "Gehlenborg", "fullName": "Nils Gehlenborg", "affiliation": "Harvard Medical School, Boston, MA", "__typename": "ArticleAuthorType" }, { "givenName": "Hilary", "surname": "Coon", "fullName": "Hilary Coon", "affiliation": "University of Utah, Salt Lake City, UT", "__typename": "ArticleAuthorType" }, { "givenName": "Alexander", "surname": "Lex", "fullName": "Alexander Lex", "affiliation": "University of Utah, Salt Lake City, UT", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2019-03-01 00:00:00", "pubType": "trans", "pages": "1543-1558", "year": "2019", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/bibe/2014/7502/0/7502a191", "title": "Disease-Gene Association Using a Genetic Algorithm", "doi": null, "abstractUrl": "/proceedings-article/bibe/2014/7502a191/12OmNAH5dk4", "parentPublication": { "id": "proceedings/bibe/2014/7502/0", "title": "2014 IEEE International Conference on Bioinformatics and Bioengineering (BIBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdew/2013/5303/0/06547449", "title": "Multivariate Data-Driven Decision Guidance for clinical scientists", "doi": null, "abstractUrl": "/proceedings-article/icdew/2013/06547449/12OmNqNG3dA", "parentPublication": { "id": "proceedings/icdew/2013/5303/0", "title": "2013 IEEE 29th International Conference on Data Engineering Workshops (ICDEW 2013)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2013/5099/0/5099a099", "title": "Visualizing Running Races through the Multivariate Time-Series of Multiple Runners", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2013/5099a099/12OmNzJbQS2", "parentPublication": { "id": "proceedings/sibgrapi/2013/5099/0", "title": "2013 XXVI Conference on Graphics, Patterns and Images", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2014/5669/0/06999382", "title": "Multifactor dimendionality reduction analysis for gene-gene interaction of multiple binary traits", "doi": null, "abstractUrl": "/proceedings-article/bibm/2014/06999382/12OmNzUgcZ4", "parentPublication": { "id": "proceedings/bibm/2014/5669/0", "title": "2014 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2016/1611/0/07822535", "title": "Predicting microRNA-disease associations by walking on four biological networks", "doi": null, "abstractUrl": "/proceedings-article/bibm/2016/07822535/12OmNzhELhX", "parentPublication": { "id": "proceedings/bibm/2016/1611/0", "title": "2016 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/2008/3165/0/3165a302", "title": "BOGENVI: A Biomedical Ontology for Modelling Gene*Environment Interactions on Intermediate Phenotypes in Nutrigenomics Research", "doi": null, "abstractUrl": "/proceedings-article/cbms/2008/3165a302/12OmNzsJ7v0", "parentPublication": { "id": "proceedings/cbms/2008/3165/0", "title": "2008 21st IEEE International Symposium on Computer-Based Medical Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tb/2018/03/07903695", "title": "A Bipartite Network and Resource Transfer-Based Approach to Infer lncRNA-Environmental Factor Associations", "doi": null, "abstractUrl": "/journal/tb/2018/03/07903695/13rRUyXKxT5", "parentPublication": { "id": "trans/tb", "title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2021/0126/0/09669487", "title": "Leveraging Integrative Knowledge Graphs to Improve Health Information Access for Rare Diseases", "doi": null, "abstractUrl": "/proceedings-article/bibm/2021/09669487/1A9VnXRByms", "parentPublication": { "id": "proceedings/bibm/2021/0126/0", "title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tb/2021/04/08854121", "title": "Identify Consistent Cross-Modality Imaging Genetic Patterns via Discriminant Sparse Canonical Correlation Analysis", "doi": null, "abstractUrl": "/journal/tb/2021/04/08854121/1dM288eZYJO", "parentPublication": { "id": "trans/tb", "title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icphds/2020/8571/0/857100a360", "title": "Causes and Treatment of Adolescent Depression", "doi": null, "abstractUrl": "/proceedings-article/icphds/2020/857100a360/1rxhud7DsZO", "parentPublication": { "id": "proceedings/icphds/2020/8571/0", "title": "2020 International Conference on Public Health and Data Science (ICPHDS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08304678", "articleId": "17D45WaTkk5", "__typename": "AdjacentArticleType" }, "next": { "fno": "08283638", "articleId": "17D45VTRouR", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTYesNm", "name": "ttg201903-08307265s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201903-08307265s1.zip", "extension": "zip", "size": "1.63 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZprc", "title": "March", "year": "2019", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "25", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45VTRouR", "doi": "10.1109/TVCG.2018.2802945", "abstract": "Large scale shadows from buildings in a city play an important role in determining the environmental quality of public spaces. They can be both beneficial, such as for pedestrians during summer, and detrimental, by impacting vegetation and by blocking direct sunlight. Determining the effects of shadows requires the accumulation of shadows over time across different periods in a year. In this paper, we propose a simple yet efficient class of approach that uses the properties of sun movement to track the changing position of shadows within a fixed time interval. We use this approach to extend two commonly used shadow techniques, shadow maps and ray tracing, and demonstrate the efficiency of our approach. Our technique is used to develop an interactive visual analysis system, Shadow Profiler, targeted at city planners and architects that allows them to test the impact of shadows for different development scenarios. We validate the usefulness of this system through case studies set in Manhattan, a dense borough of New York City.", "abstracts": [ { "abstractType": "Regular", "content": "Large scale shadows from buildings in a city play an important role in determining the environmental quality of public spaces. They can be both beneficial, such as for pedestrians during summer, and detrimental, by impacting vegetation and by blocking direct sunlight. Determining the effects of shadows requires the accumulation of shadows over time across different periods in a year. In this paper, we propose a simple yet efficient class of approach that uses the properties of sun movement to track the changing position of shadows within a fixed time interval. We use this approach to extend two commonly used shadow techniques, shadow maps and ray tracing, and demonstrate the efficiency of our approach. Our technique is used to develop an interactive visual analysis system, Shadow Profiler, targeted at city planners and architects that allows them to test the impact of shadows for different development scenarios. We validate the usefulness of this system through case studies set in Manhattan, a dense borough of New York City.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Large scale shadows from buildings in a city play an important role in determining the environmental quality of public spaces. They can be both beneficial, such as for pedestrians during summer, and detrimental, by impacting vegetation and by blocking direct sunlight. Determining the effects of shadows requires the accumulation of shadows over time across different periods in a year. In this paper, we propose a simple yet efficient class of approach that uses the properties of sun movement to track the changing position of shadows within a fixed time interval. We use this approach to extend two commonly used shadow techniques, shadow maps and ray tracing, and demonstrate the efficiency of our approach. Our technique is used to develop an interactive visual analysis system, Shadow Profiler, targeted at city planners and architects that allows them to test the impact of shadows for different development scenarios. We validate the usefulness of this system through case studies set in Manhattan, a dense borough of New York City.", "title": "Shadow Accrual Maps: Efficient Accumulation of City-Scale Shadows Over Time", "normalizedTitle": "Shadow Accrual Maps: Efficient Accumulation of City-Scale Shadows Over Time", "fno": "08283638", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Urban Areas", "Ray Tracing", "Buildings", "Visual Analytics", "Sun", "Tools", "Shadow Accumulation", "Shadow Accrual Maps", "Visual Analysis", "Urban Development" ], "authors": [ { "givenName": "Fabio", "surname": "Miranda", "fullName": "Fabio Miranda", "affiliation": "New York University, New York, NY", "__typename": "ArticleAuthorType" }, { "givenName": "Harish", "surname": "Doraiswamy", "fullName": "Harish Doraiswamy", "affiliation": "New York University, New York, NY", "__typename": "ArticleAuthorType" }, { "givenName": "Marcos", "surname": "Lage", "fullName": "Marcos Lage", "affiliation": "Universidade Federal Fluminense, Niteroi, Rio de Janeiro, Brazil", "__typename": "ArticleAuthorType" }, { "givenName": "Luc", "surname": "Wilson", "fullName": "Luc Wilson", "affiliation": "Kohn Pedersen Fox Associates PC, New York, NY", "__typename": "ArticleAuthorType" }, { "givenName": "Mondrian", "surname": "Hsieh", "fullName": "Mondrian Hsieh", "affiliation": "Kohn Pedersen Fox Associates PC, New York, NY", "__typename": "ArticleAuthorType" }, { "givenName": "Cláudio T.", "surname": "Silva", "fullName": "Cláudio T. Silva", "affiliation": "New York University, New York, NY", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2019-03-01 00:00:00", "pubType": "trans", "pages": "1559-1574", "year": "2019", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iwcse/2009/3881/1/3881a488", "title": "A Survey of Shadow Rendering Algorithms: Projection Shadows and Shadow Volumes", "doi": null, "abstractUrl": "/proceedings-article/iwcse/2009/3881a488/12OmNBUAvYS", "parentPublication": { "id": "proceedings/iwcse/2009/3881/1", "title": "Computer Science and Engineering, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2016/5407/0/5407a537", "title": "Detecting and Correcting Shadows in Urban Point Clouds and Image Collections", "doi": null, "abstractUrl": "/proceedings-article/3dv/2016/5407a537/12OmNqGA56R", "parentPublication": { "id": "proceedings/3dv/2016/5407/0", "title": "2016 Fourth International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgi/2001/1007/0/10070209", "title": "Hardware-Accelerated Rendering of Antialiased Shadows with Shadow Maps", "doi": null, "abstractUrl": "/proceedings-article/cgi/2001/10070209/12OmNwM6zXZ", "parentPublication": { "id": "proceedings/cgi/2001/1007/0", "title": "Proceedings. Computer Graphics International 2001", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/est/2012/4791/0/4791a043", "title": "Finding People by their Shadows: Aerial Surveillance Using Body Biometrics Extracted from Ground Video", "doi": null, "abstractUrl": "/proceedings-article/est/2012/4791a043/12OmNwt5sjk", "parentPublication": { "id": "proceedings/est/2012/4791/0", "title": "2012 Third International Conference on Emerging Security Technologies", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/estimedia/2011/2123/0/06088525", "title": "Shadow-based vehicle model refinement and tracking in advanced automotive driver assistance systems", "doi": null, "abstractUrl": "/proceedings-article/estimedia/2011/06088525/12OmNy3147u", "parentPublication": { "id": "proceedings/estimedia/2011/2123/0", "title": "2011 9th IEEE Symposium on Embedded Systems for Real-Time Multimedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2011/0394/0/05995622", "title": "Linearity of each channel pixel values from a surface in and out of shadows and its applications", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2011/05995622/12OmNyNQSAH", "parentPublication": { "id": "proceedings/cvpr/2011/0394/0", "title": "CVPR 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/1995/7042/0/70420203", "title": "Combining color and geometry for the active, visual recognition of shadows", "doi": null, "abstractUrl": "/proceedings-article/iccv/1995/70420203/12OmNyaXPVi", "parentPublication": { "id": "proceedings/iccv/1995/7042/0", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200n3087", "title": "Efficient and Differentiable Shadow Computation for Inverse Problems", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200n3087/1BmFvUmmGGY", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798049", "title": "Shadow Inducers: Inconspicuous Highlights for Casting Virtual Shadows on OST-HMDs", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798049/1cJ0UaezhG8", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08307265", "articleId": "17D45Xbl4Qj", "__typename": "AdjacentArticleType" }, "next": { "fno": "08302598", "articleId": "17D45WaTkk4", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZprc", "title": "March", "year": "2019", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "25", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45WaTkk4", "doi": "10.1109/TVCG.2018.2808969", "abstract": "The Parallel Coordinates plot is a popular tool for the visualization of high-dimensional data. One of the main challenges when using parallel coordinates is occlusion and overplotting resulting from large data sets. Brushing is a popular approach to address these challenges. Since its conception, limited improvements have been made to brushing both in the form of visual design and functional interaction. We present a set of novel, smart brushing techniques that enhance the standard interactive brushing of a parallel coordinates plot. We introduce two new interaction concepts: Higher-order, sketch-based brushing, and smart, data-driven brushing. Higher-order brushes support interactive, flexible, n-dimensional pattern searches involving an arbitrary number of dimensions. Smart, data-driven brushing provides interactive, real-time guidance to the user during the brushing process based on derived meta-data. In addition, we implement a selection of novel enhancements and user options that complement the two techniques as well as enhance the exploration and analytical ability of the user. We demonstrate the utility and evaluate the results using a case study with a large, high-dimensional, real-world telecommunication data set and we report domain expert feedback from the data suppliers.", "abstracts": [ { "abstractType": "Regular", "content": "The Parallel Coordinates plot is a popular tool for the visualization of high-dimensional data. One of the main challenges when using parallel coordinates is occlusion and overplotting resulting from large data sets. Brushing is a popular approach to address these challenges. Since its conception, limited improvements have been made to brushing both in the form of visual design and functional interaction. We present a set of novel, smart brushing techniques that enhance the standard interactive brushing of a parallel coordinates plot. We introduce two new interaction concepts: Higher-order, sketch-based brushing, and smart, data-driven brushing. Higher-order brushes support interactive, flexible, n-dimensional pattern searches involving an arbitrary number of dimensions. Smart, data-driven brushing provides interactive, real-time guidance to the user during the brushing process based on derived meta-data. In addition, we implement a selection of novel enhancements and user options that complement the two techniques as well as enhance the exploration and analytical ability of the user. We demonstrate the utility and evaluate the results using a case study with a large, high-dimensional, real-world telecommunication data set and we report domain expert feedback from the data suppliers.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The Parallel Coordinates plot is a popular tool for the visualization of high-dimensional data. One of the main challenges when using parallel coordinates is occlusion and overplotting resulting from large data sets. Brushing is a popular approach to address these challenges. Since its conception, limited improvements have been made to brushing both in the form of visual design and functional interaction. We present a set of novel, smart brushing techniques that enhance the standard interactive brushing of a parallel coordinates plot. We introduce two new interaction concepts: Higher-order, sketch-based brushing, and smart, data-driven brushing. Higher-order brushes support interactive, flexible, n-dimensional pattern searches involving an arbitrary number of dimensions. Smart, data-driven brushing provides interactive, real-time guidance to the user during the brushing process based on derived meta-data. In addition, we implement a selection of novel enhancements and user options that complement the two techniques as well as enhance the exploration and analytical ability of the user. We demonstrate the utility and evaluate the results using a case study with a large, high-dimensional, real-world telecommunication data set and we report domain expert feedback from the data suppliers.", "title": "Smart Brushing for Parallel Coordinates", "normalizedTitle": "Smart Brushing for Parallel Coordinates", "fno": "08302598", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualisation", "Real World Telecommunication Data Set", "Data Suppliers", "Parallel Coordinates Plot", "High Dimensional Data", "Visual Design", "Smart Brushing Techniques", "Standard Interactive Brushing", "Interaction Concepts", "Sketch Based Brushing", "Smart Data Driven Brushing", "N Dimensional Pattern Searches", "Brushing Process", "Brushes", "Data Visualization", "Visualization", "Communications Technology", "Industries", "Tools", "Standards", "Multivariate Visualization", "Parallel Coordinates", "Call Center", "Glyph", "Brushing", "Interaction Techniques" ], "authors": [ { "givenName": "Richard C.", "surname": "Roberts", "fullName": "Richard C. Roberts", "affiliation": "Department of Computer Science, Swansea University, Swansea, Wales", "__typename": "ArticleAuthorType" }, { "givenName": "Robert S.", "surname": "Laramee", "fullName": "Robert S. Laramee", "affiliation": "Department of Computer Science, Swansea University, Swansea, Wales", "__typename": "ArticleAuthorType" }, { "givenName": "Gary A.", "surname": "Smith", "fullName": "Gary A. Smith", "affiliation": "QPC Ltd, Mold, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Paul", "surname": "Brookes", "fullName": "Paul Brookes", "affiliation": "QPC Ltd, Mold, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Tony", "surname": "D'Cruze", "fullName": "Tony D'Cruze", "affiliation": "QPC Ltd, Mold, United Kingdom", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2019-03-01 00:00:00", "pubType": "trans", "pages": "1575-1590", "year": "2019", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/infvis/2003/8154/0/01249024", "title": "Compound brushing [dynamic data visualization]", "doi": null, "abstractUrl": "/proceedings-article/infvis/2003/01249024/12OmNvAiSFw", "parentPublication": { "id": "proceedings/infvis/2003/8154/0", "title": "IEEE Symposium on Information Visualization 2003", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-infovis/2003/2055/0/01249024", "title": "Compound brushing [dynamic data visualization]", "doi": null, "abstractUrl": "/proceedings-article/ieee-infovis/2003/01249024/12OmNyOHG3V", "parentPublication": { "id": "proceedings/ieee-infovis/2003/2055/0", "title": "Information Visualization, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cmv/2004/2179/0/21790093", "title": "Parallel Coordinates for Exploring Properties of Subsets", "doi": null, "abstractUrl": "/proceedings-article/cmv/2004/21790093/12OmNzVGcAH", "parentPublication": { "id": "proceedings/cmv/2004/2179/0", "title": "Proceedings. Second International Conference on Coordinated & Multiple Views in Exploratory Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-infovis/2002/1751/0/17510127", "title": "Angular Brushing of Extended Parallel Coordinates", "doi": null, "abstractUrl": "/proceedings-article/ieee-infovis/2002/17510127/12OmNzYNNf3", "parentPublication": { "id": "proceedings/ieee-infovis/2002/1751/0", "title": "Information Visualization, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/01/08017621", "title": "MyBrush: Brushing and Linking with Personal Agency", "doi": null, "abstractUrl": "/journal/tg/2018/01/08017621/13rRUxD9gXN", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/06/ttg2009061001", "title": "Scattering Points in Parallel Coordinates", "doi": null, "abstractUrl": "/journal/tg/2009/06/ttg2009061001/13rRUxNW1TQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/06/07911335", "title": "Indexed-Points Parallel Coordinates Visualization of Multivariate Correlations", "doi": null, "abstractUrl": "/journal/tg/2018/06/07911335/13rRUxly9e1", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2022/9007/0/900700a114", "title": "Comparative evaluation of the Scatter Plot Matrix and Parallel Coordinates Plot Matrix", "doi": null, "abstractUrl": "/proceedings-article/iv/2022/900700a114/1KaFNhzetSo", "parentPublication": { "id": "proceedings/iv/2022/9007/0", "title": "2022 26th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2019/04/08739141", "title": "Personalized Sketch-Based Brushing in Scatterplots", "doi": null, "abstractUrl": "/magazine/cg/2019/04/08739141/1aXM9T7Z0xq", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2019/4941/0/08933632", "title": "Conditional Parallel Coordinates", "doi": null, "abstractUrl": "/proceedings-article/vis/2019/08933632/1fTgJgZx0go", "parentPublication": { "id": "proceedings/vis/2019/4941/0", "title": "2019 IEEE Visualization Conference (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08283638", "articleId": "17D45VTRouR", "__typename": "AdjacentArticleType" }, "next": { "fno": "08307258", "articleId": "17D45XacGi4", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgFB", "name": "ttg201903-08302598s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201903-08302598s1.zip", "extension": "zip", "size": "41.4 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZprc", "title": "March", "year": "2019", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "25", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45XacGi4", "doi": "10.1109/TVCG.2018.2812879", "abstract": "In recent years, consumer-level depth cameras have been adopted for various applications. However, they often produce depth maps at only a moderately high frame rate (approximately 30 frames per second), preventing them from being used for applications such as digitizing human performance involving fast motion. On the other hand, low-cost, high-frame-rate video cameras are available. This motivates us to develop a hybrid camera that consists of a high-frame-rate video camera and a low-frame-rate depth camera and to allow temporal interpolation of depth maps with the help of auxiliary color images. To achieve this, we develop a novel algorithm that reconstructs intermediate depth maps and estimates scene flow simultaneously. We test our algorithm on various examples involving fast, non-rigid motions of single or multiple objects. Our experiments show that our scene flow estimation method is more precise than a tracking-based method and the state-of-the-art techniques.", "abstracts": [ { "abstractType": "Regular", "content": "In recent years, consumer-level depth cameras have been adopted for various applications. However, they often produce depth maps at only a moderately high frame rate (approximately 30 frames per second), preventing them from being used for applications such as digitizing human performance involving fast motion. On the other hand, low-cost, high-frame-rate video cameras are available. This motivates us to develop a hybrid camera that consists of a high-frame-rate video camera and a low-frame-rate depth camera and to allow temporal interpolation of depth maps with the help of auxiliary color images. To achieve this, we develop a novel algorithm that reconstructs intermediate depth maps and estimates scene flow simultaneously. We test our algorithm on various examples involving fast, non-rigid motions of single or multiple objects. Our experiments show that our scene flow estimation method is more precise than a tracking-based method and the state-of-the-art techniques.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In recent years, consumer-level depth cameras have been adopted for various applications. However, they often produce depth maps at only a moderately high frame rate (approximately 30 frames per second), preventing them from being used for applications such as digitizing human performance involving fast motion. On the other hand, low-cost, high-frame-rate video cameras are available. This motivates us to develop a hybrid camera that consists of a high-frame-rate video camera and a low-frame-rate depth camera and to allow temporal interpolation of depth maps with the help of auxiliary color images. To achieve this, we develop a novel algorithm that reconstructs intermediate depth maps and estimates scene flow simultaneously. We test our algorithm on various examples involving fast, non-rigid motions of single or multiple objects. Our experiments show that our scene flow estimation method is more precise than a tracking-based method and the state-of-the-art techniques.", "title": "Temporal Upsampling of Depth Maps Using a Hybrid Camera", "normalizedTitle": "Temporal Upsampling of Depth Maps Using a Hybrid Camera", "fno": "08307258", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Image Colour Analysis", "Image Motion Analysis", "Image Resolution", "Interpolation", "Video Cameras", "Auxiliary Color Images", "Scene Flow Estimation Method", "Temporal Interpolation", "Intermediate Depth Maps", "Moderately High Frame Rate", "Consumer Level Depth Cameras", "Low Frame Rate Depth Camera", "High Frame Rate Video Camera", "Cameras", "Color", "Image Reconstruction", "Estimation", "Image Resolution", "Optimization", "Tracking", "Hybrid Camera", "Scene Flow Estimation", "Depth Upsampling" ], "authors": [ { "givenName": "Ming-Ze", "surname": "Yuan", "fullName": "Ming-Ze Yuan", "affiliation": "Chinese Academy of Sciences, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Lin", "surname": "Gao", "fullName": "Lin Gao", "affiliation": "Chinese Academy of Sciences, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Hongbo", "surname": "Fu", "fullName": "Hongbo Fu", "affiliation": "City University of Hong Kong, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Shihong", "surname": "Xia", "fullName": "Shihong Xia", "affiliation": "Chinese Academy of Sciences, Beijing, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2019-03-01 00:00:00", "pubType": "trans", "pages": "1591-1602", "year": "2019", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2014/4308/0/4308a738", "title": "Guided Depth Upsampling via a Cosparse Analysis Model", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2014/4308a738/12OmNAlNiQF", "parentPublication": { "id": "proceedings/cvprw/2014/4308/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2015/9403/0/9403a133", "title": "Depth Map Super-Resolution for Cost-Effective RGB-D Camera", "doi": null, "abstractUrl": "/proceedings-article/cw/2015/9403a133/12OmNApculG", "parentPublication": { "id": "proceedings/cw/2015/9403/0", "title": "2015 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2015/8667/0/07168366", "title": "Dual Aperture Photography: Image and Depth from a Mobile Camera", "doi": null, "abstractUrl": "/proceedings-article/iccp/2015/07168366/12OmNBQkwXM", "parentPublication": { "id": "proceedings/iccp/2015/8667/0", "title": "2015 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2012/1226/0/012P1A12", "title": "Video stabilization with a depth camera", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2012/012P1A12/12OmNBTawuv", "parentPublication": { "id": "proceedings/cvpr/2012/1226/0", "title": "2012 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2012/4711/0/4711a152", "title": "Joint Example-Based Depth Map Super-Resolution", "doi": null, "abstractUrl": "/proceedings-article/icme/2012/4711a152/12OmNqBbHAj", "parentPublication": { "id": "proceedings/icme/2012/4711/0", "title": "2012 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2018/4886/0/488601b377", "title": "Depth Map Completion by Jointly Exploiting Blurry Color Images and Sparse Depth Maps", "doi": null, "abstractUrl": "/proceedings-article/wacv/2018/488601b377/12OmNwwuE12", "parentPublication": { "id": "proceedings/wacv/2018/4886/0", "title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2015/7962/0/7962a188", "title": "Simultaneously Estimation of Super-Resolution Images and Depth Maps from Low Resolution Sensors", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2015/7962a188/12OmNxwWoDb", "parentPublication": { "id": "proceedings/sibgrapi/2015/7962/0", "title": "2015 28th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2017/2818/0/2818a080", "title": "Depth Estimation of Semi-submerged Objects Using a Light-Field Camera", "doi": null, "abstractUrl": "/proceedings-article/crv/2017/2818a080/12OmNyQ7FUK", "parentPublication": { "id": "proceedings/crv/2017/2818/0", "title": "2017 14th Conference on Computer and Robot Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2009/4442/0/05457433", "title": "3D-color video camera", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2009/05457433/12OmNznkKcx", "parentPublication": { "id": "proceedings/iccvw/2009/4442/0", "title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2016/02/mmu2016020072", "title": "Extended Guided Filtering for Depth Map Upsampling", "doi": null, "abstractUrl": "/magazine/mu/2016/02/mmu2016020072/13rRUyekJ2X", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08302598", "articleId": "17D45WaTkk4", "__typename": "AdjacentArticleType" }, "next": { "fno": "08304611", "articleId": "17D45XeKgnu", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZprc", "title": "March", "year": "2019", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "25", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45XeKgnu", "doi": "10.1109/TVCG.2018.2810279", "abstract": "We propose a unified mathematical model for multilayer-multiframe compressive light field displays that supports both attenuation-based and polarization-based architectures. We show that the light field decomposition of such a display can be cast as a bound constrained nonlinear matrix optimization problem. Efficient light field decomposition algorithms are developed using the limited-memory BFGS (L-BFGS) method for automultiscopic displays with high resolution and high image fidelity. In addition, this framework is the first to support multilayer polarization-based compressive light field displays with time multiplexing. This new architecture significantly reduces artifacts compared with attenuation-based multilayer-multiframe displays; thus, it can allow the requirements regarding the number of layers or the refresh rate to be relaxed. We verify the proposed methods by constructing two 3-layer prototypes using high-speed LCDs, one based on the attenuation architecture and one based on the polarization architecture. Moreover, an efficient CUDA-based program is implemented. Our displays can produce images with higher spatial resolution with thinner form factors compared with traditional automultiscopic displays in both simulations and experiments.", "abstracts": [ { "abstractType": "Regular", "content": "We propose a unified mathematical model for multilayer-multiframe compressive light field displays that supports both attenuation-based and polarization-based architectures. We show that the light field decomposition of such a display can be cast as a bound constrained nonlinear matrix optimization problem. Efficient light field decomposition algorithms are developed using the limited-memory BFGS (L-BFGS) method for automultiscopic displays with high resolution and high image fidelity. In addition, this framework is the first to support multilayer polarization-based compressive light field displays with time multiplexing. This new architecture significantly reduces artifacts compared with attenuation-based multilayer-multiframe displays; thus, it can allow the requirements regarding the number of layers or the refresh rate to be relaxed. We verify the proposed methods by constructing two 3-layer prototypes using high-speed LCDs, one based on the attenuation architecture and one based on the polarization architecture. Moreover, an efficient CUDA-based program is implemented. Our displays can produce images with higher spatial resolution with thinner form factors compared with traditional automultiscopic displays in both simulations and experiments.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose a unified mathematical model for multilayer-multiframe compressive light field displays that supports both attenuation-based and polarization-based architectures. We show that the light field decomposition of such a display can be cast as a bound constrained nonlinear matrix optimization problem. Efficient light field decomposition algorithms are developed using the limited-memory BFGS (L-BFGS) method for automultiscopic displays with high resolution and high image fidelity. In addition, this framework is the first to support multilayer polarization-based compressive light field displays with time multiplexing. This new architecture significantly reduces artifacts compared with attenuation-based multilayer-multiframe displays; thus, it can allow the requirements regarding the number of layers or the refresh rate to be relaxed. We verify the proposed methods by constructing two 3-layer prototypes using high-speed LCDs, one based on the attenuation architecture and one based on the polarization architecture. Moreover, an efficient CUDA-based program is implemented. Our displays can produce images with higher spatial resolution with thinner form factors compared with traditional automultiscopic displays in both simulations and experiments.", "title": "Unified Mathematical Model for Multilayer-Multiframe Compressive Light Field Displays Using LCDs", "normalizedTitle": "Unified Mathematical Model for Multilayer-Multiframe Compressive Light Field Displays Using LCDs", "fno": "08304611", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Architecture", "Multiplexing", "Optimization", "Liquid Crystal Displays", "Mathematical Model", "Nonhomogeneous Media", "Nonlinear Optics", "Compressive Light Field Display", "Multilayer Multiframe LCD Display", "Optimization Methods", "Polarization Based Display" ], "authors": [ { "givenName": "Jiahui", "surname": "Zhang", "fullName": "Jiahui Zhang", "affiliation": "Department of Biomedical Engineering, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Zhencheng", "surname": "Fan", "fullName": "Zhencheng Fan", "affiliation": "Department of Biomedical Engineering, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Dawei", "surname": "Sun", "fullName": "Dawei Sun", "affiliation": "Department of Automation, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Hongen", "surname": "Liao", "fullName": "Hongen Liao", "affiliation": "Department of Biomedical Engineering, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2019-03-01 00:00:00", "pubType": "trans", "pages": "1603-1614", "year": "2019", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/asonam/2016/2846/0/07752422", "title": "Local community detection in multilayer networks", "doi": null, "abstractUrl": "/proceedings-article/asonam/2016/07752422/12OmNxuXcAX", "parentPublication": { "id": "proceedings/asonam/2016/2846/0", "title": "2016 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dsc/2017/1600/0/1600a368", "title": "Tasks for Visual Analytics in Multilayer Networks", "doi": null, "abstractUrl": "/proceedings-article/dsc/2017/1600a368/12OmNz61dsf", "parentPublication": { "id": "proceedings/dsc/2017/1600/0", "title": "2017 IEEE Second International Conference on Data Science in Cyberspace (DSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tn/2018/03/08039503", "title": "Isomorphisms in Multilayer Networks", "doi": null, "abstractUrl": "/journal/tn/2018/03/08039503/13rRUIJuxqo", "parentPublication": { "id": "trans/tn", "title": "IEEE Transactions on Network Science and Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2012/05/mcg2012050006", "title": "Compressive Light Field Displays", "doi": null, "abstractUrl": "/magazine/cg/2012/05/mcg2012050006/13rRUNvPLcn", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tn/2015/02/07093190", "title": "Spreading Processes in Multilayer Networks", "doi": null, "abstractUrl": "/journal/tn/2015/02/07093190/13rRUxDIti0", "parentPublication": { "id": "trans/tn", "title": "IEEE Transactions on Network Science and Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/11/08007218", "title": "Occlusion Leak Compensation for Optical See-Through Displays Using a Single-Layer Transmissive Spatial Light Modulator", "doi": null, "abstractUrl": "/journal/tg/2017/11/08007218/13rRUxcbnHi", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08676153", "title": "Light Attenuation Display: Subtractive See-Through Near-Eye Display via Spatial Color Filtering", "doi": null, "abstractUrl": "/journal/tg/2019/05/08676153/18LFbQfp6x2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08676155", "title": "Varifocal Occlusion for Optical See-Through Head-Mounted Displays using a Slide Occlusion Mask", "doi": null, "abstractUrl": "/journal/tg/2019/05/08676155/18LFfGhc49i", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2021/3902/0/09671831", "title": "Truss Decomposition on Multilayer Graphs", "doi": null, "abstractUrl": "/proceedings-article/big-data/2021/09671831/1A8hpC7FG92", "parentPublication": { "id": "proceedings/big-data/2021/3902/0", "title": "2021 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800c133", "title": "Polarized Non-Line-of-Sight Imaging", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800c133/1m3oizGbFF6", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08307258", "articleId": "17D45XacGi4", "__typename": "AdjacentArticleType" }, "next": { "fno": "08283817", "articleId": "17D45XacGi3", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTYesNM", "name": "ttg201903-08304611s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201903-08304611s1.zip", "extension": "zip", "size": "40.9 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZprc", "title": "March", "year": "2019", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "25", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45XacGi3", "doi": "10.1109/TVCG.2018.2803829", "abstract": "In this design study, we present a visualization technique that segments patients’ histories instead of treating them as raw event sequences, aggregates the segments using criteria such as the whole history or treatment combinations, and then visualizes the aggregated segments as static dashboards that are arranged in a dashboard network to show longitudinal changes. The static dashboards were developed in nine iterations, to show 15 important attributes from the patients’ histories. The final design was evaluated with five non-experts, five visualization experts and four medical experts, who successfully used it to gain an overview of a 2,000 patient dataset, and to make observations about longitudinal changes and differences between two cohorts. The research represents a step-change in the detail of large-scale data that may be successfully visualized using dashboards, and provides guidance about how the approach may be generalized.", "abstracts": [ { "abstractType": "Regular", "content": "In this design study, we present a visualization technique that segments patients’ histories instead of treating them as raw event sequences, aggregates the segments using criteria such as the whole history or treatment combinations, and then visualizes the aggregated segments as static dashboards that are arranged in a dashboard network to show longitudinal changes. The static dashboards were developed in nine iterations, to show 15 important attributes from the patients’ histories. The final design was evaluated with five non-experts, five visualization experts and four medical experts, who successfully used it to gain an overview of a 2,000 patient dataset, and to make observations about longitudinal changes and differences between two cohorts. The research represents a step-change in the detail of large-scale data that may be successfully visualized using dashboards, and provides guidance about how the approach may be generalized.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this design study, we present a visualization technique that segments patients’ histories instead of treating them as raw event sequences, aggregates the segments using criteria such as the whole history or treatment combinations, and then visualizes the aggregated segments as static dashboards that are arranged in a dashboard network to show longitudinal changes. The static dashboards were developed in nine iterations, to show 15 important attributes from the patients’ histories. The final design was evaluated with five non-experts, five visualization experts and four medical experts, who successfully used it to gain an overview of a 2,000 patient dataset, and to make observations about longitudinal changes and differences between two cohorts. The research represents a step-change in the detail of large-scale data that may be successfully visualized using dashboards, and provides guidance about how the approach may be generalized.", "title": "Using Dashboard Networks to Visualize Multiple Patient Histories: A Design Study on Post-Operative Prostate Cancer", "normalizedTitle": "Using Dashboard Networks to Visualize Multiple Patient Histories: A Design Study on Post-Operative Prostate Cancer", "fno": "08283817", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualization", "History", "Prostate Cancer", "Visualization", "Task Analysis", "Tools", "Medical Diagnostic Imaging", "Information Visualization", "Visual Analytics", "Multivariate Data Visualization", "Electronic Health Care Records", "Medical Data Analysis", "Prostate Cancer Disease", "Design Study", "User Study", "Evaluation", "Static Dashboard", "Dashboard Network" ], "authors": [ { "givenName": "Jürgen", "surname": "Bernard", "fullName": "Jürgen Bernard", "affiliation": "TU Darmstadt, Darmstadt, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "David", "surname": "Sessler", "fullName": "David Sessler", "affiliation": "TU Darmstadt, Darmstadt, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Jörn", "surname": "Kohlhammer", "fullName": "Jörn Kohlhammer", "affiliation": "TU Darmstadt, Darmstadt, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Roy A.", "surname": "Ruddle", "fullName": "Roy A. Ruddle", "affiliation": "University of Leeds, Leeds, United Kingdom", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2019-03-01 00:00:00", "pubType": "trans", "pages": "1615-1628", "year": "2019", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iv/2009/3733/0/3733a156", "title": "BrowseLine: 2D Timeline Visualization of Web Browsing Histories", "doi": null, "abstractUrl": "/proceedings-article/iv/2009/3733a156/12OmNCbU2VK", "parentPublication": { "id": "proceedings/iv/2009/3733/0", "title": "2009 13th International Conference Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibe/2004/2173/0/21730191", "title": "Quantitation of Extra-Capsular Prostate Tissue from Reconstructed Tissue Images", "doi": null, "abstractUrl": "/proceedings-article/bibe/2004/21730191/12OmNvHoQpz", "parentPublication": { "id": "proceedings/bibe/2004/2173/0", "title": "Fourth IEEE Symposium on Bioinformatics and Bioengineering (BIBE'04)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icis/2016/0806/0/07550886", "title": "Partially developed coverability graphs for modeling test case execution histories", "doi": null, "abstractUrl": "/proceedings-article/icis/2016/07550886/12OmNyQYt44", "parentPublication": { "id": "proceedings/icis/2016/0806/0", "title": "2016 IEEE/ACIS 15th International Conference on Computer and Information Science (ICIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ichi/2018/5377/0/537701a380", "title": "Mapping the Treatment Journey for Patients with Prostate Cancer", "doi": null, "abstractUrl": "/proceedings-article/ichi/2018/537701a380/12OmNzSQdji", "parentPublication": { "id": "proceedings/ichi/2018/5377/0", "title": "2018 IEEE International Conference on Healthcare Informatics (ICHI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itag/2015/7874/0/7874a071", "title": "Co-design of a Prostate Cancer Serious Game for African Caribbean Men", "doi": null, "abstractUrl": "/proceedings-article/itag/2015/7874a071/12OmNzYNN1h", "parentPublication": { "id": "proceedings/itag/2015/7874/0", "title": "2015 International Conference on Interactive Technologies and Games (iTAG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2015/03/mcg2015030044", "title": "A Visual-Interactive System for Prostate Cancer Cohort Analysis", "doi": null, "abstractUrl": "/magazine/cg/2015/03/mcg2015030044/13rRUwInv6U", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/lt/2017/01/07542151", "title": "Perceiving Learning at a Glance: A Systematic Literature Review of Learning Dashboard Research", "doi": null, "abstractUrl": "/journal/lt/2017/01/07542151/13rRUwdIOWV", "parentPublication": { "id": "trans/lt", "title": "IEEE Transactions on Learning Technologies", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07539638", "title": "PROACT: Iterative Design of a Patient-Centered Visualization for Effective Prostate Cancer Health Risk Communication", "doi": null, "abstractUrl": "/journal/tg/2017/01/07539638/13rRUxYINfk", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/10057994", "title": "Dashboard Design Mining and Recommendation", "doi": null, "abstractUrl": "/journal/tg/5555/01/10057994/1LbFmG2HHnW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2023/01/09656613", "title": "Finding Their Data Voice: Practices and Challenges of Dashboard Users", "doi": null, "abstractUrl": "/magazine/cg/2023/01/09656613/1zumu8nC20U", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08304611", "articleId": "17D45XeKgnu", "__typename": "AdjacentArticleType" }, "next": { "fno": "08320335", "articleId": "17D45WK5Arb", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTYesN3", "name": "ttg201903-08283817s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201903-08283817s1.zip", "extension": "zip", "size": "396 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZprc", "title": "March", "year": "2019", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "25", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45WK5Arb", "doi": "10.1109/TVCG.2018.2809751", "abstract": "This paper introduces a procedure for the calculation of the vertex positions in Marching-Cubes-like surface reconstruction methods, when the surface to reconstruct is characterised by a discrete indicator function. Linear or higher order methods for the vertex interpolation problem require a smooth input function. Therefore, the interpolation methodology to convert a discontinuous indicator function into a triangulated surface is non-trivial. Analytical formulations for this specific vertex interpolation problem have been derived for the 2D case by Manson et al. [Eurographics (2011) 30, 2] and the straightforward application of their method to a 3D case gives satisfactory visual results. A rigorous extension to 3D, however, requires a least-squares problem to be solved for the discrete values of a symmetric neighbourhood. It thus relies on an extra layer of information, and comes at a significantly higher cost. This paper proposes a novel vertex interpolation method which yields second-order-accurate reconstructed surfaces in the general 3D case, without altering the locality of the method. The associated errors are analysed and comparisons are made with linear vertex interpolation and the analytical formulations of Manson et al. [Eurographics (2011) 30, 2].", "abstracts": [ { "abstractType": "Regular", "content": "This paper introduces a procedure for the calculation of the vertex positions in Marching-Cubes-like surface reconstruction methods, when the surface to reconstruct is characterised by a discrete indicator function. Linear or higher order methods for the vertex interpolation problem require a smooth input function. Therefore, the interpolation methodology to convert a discontinuous indicator function into a triangulated surface is non-trivial. Analytical formulations for this specific vertex interpolation problem have been derived for the 2D case by Manson et al. [Eurographics (2011) 30, 2] and the straightforward application of their method to a 3D case gives satisfactory visual results. A rigorous extension to 3D, however, requires a least-squares problem to be solved for the discrete values of a symmetric neighbourhood. It thus relies on an extra layer of information, and comes at a significantly higher cost. This paper proposes a novel vertex interpolation method which yields second-order-accurate reconstructed surfaces in the general 3D case, without altering the locality of the method. The associated errors are analysed and comparisons are made with linear vertex interpolation and the analytical formulations of Manson et al. [Eurographics (2011) 30, 2].", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper introduces a procedure for the calculation of the vertex positions in Marching-Cubes-like surface reconstruction methods, when the surface to reconstruct is characterised by a discrete indicator function. Linear or higher order methods for the vertex interpolation problem require a smooth input function. Therefore, the interpolation methodology to convert a discontinuous indicator function into a triangulated surface is non-trivial. Analytical formulations for this specific vertex interpolation problem have been derived for the 2D case by Manson et al. [Eurographics (2011) 30, 2] and the straightforward application of their method to a 3D case gives satisfactory visual results. A rigorous extension to 3D, however, requires a least-squares problem to be solved for the discrete values of a symmetric neighbourhood. It thus relies on an extra layer of information, and comes at a significantly higher cost. This paper proposes a novel vertex interpolation method which yields second-order-accurate reconstructed surfaces in the general 3D case, without altering the locality of the method. The associated errors are analysed and comparisons are made with linear vertex interpolation and the analytical formulations of Manson et al. [Eurographics (2011) 30, 2].", "title": "Surface Reconstruction from Discrete Indicator Functions", "normalizedTitle": "Surface Reconstruction from Discrete Indicator Functions", "fno": "08320335", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computational Geometry", "Image Reconstruction", "Interpolation", "Least Squares Approximations", "Surface Reconstruction", "Discrete Indicator Function", "Smooth Input Function", "Interpolation Methodology", "Discontinuous Indicator Function", "Triangulated Surface", "Least Squares Problem", "Second Order Accurate Reconstructed Surfaces", "General 3 D Case", "Linear Vertex Interpolation", "Vertex Positions", "Marching Cubes Like Surface Reconstruction Methods", "Vertex Interpolation Method", "Vertex Interpolation Problem", "Surface Reconstruction", "Interpolation", "Surface Treatment", "Smoothing Methods", "Three Dimensional Displays", "Image Reconstruction", "Surface Reconstruction", "Volume Fractions", "Discrete Indicator Function", "Marching Cubes", "Vertex Interpolation" ], "authors": [ { "givenName": "Fabien", "surname": "Evrard", "fullName": "Fabien Evrard", "affiliation": "Department of Mechanical Engineering, Imperial College London, London, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Fabian", "surname": "Denner", "fullName": "Fabian Denner", "affiliation": "Department of Mechanical Engineering, Imperial College London, London, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Berend", "surname": "van Wachem", "fullName": "Berend van Wachem", "affiliation": "Department of Mechanical Engineering, Imperial College London, London, United Kingdom", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "03", "pubDate": "2019-03-01 00:00:00", "pubType": "trans", "pages": "1629-1635", "year": "2019", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/1988/0862/0/00196227", "title": "Improving visible-surface reconstruction", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1988/00196227/12OmNARAn8b", "parentPublication": { "id": "proceedings/cvpr/1988/0862/0", "title": "Proceedings CVPR '88: The Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccis/2012/4789/0/4789a112", "title": "Surface Reconstruction by the Multiquadric Function", "doi": null, "abstractUrl": "/proceedings-article/iccis/2012/4789a112/12OmNC4wtxz", "parentPublication": { "id": "proceedings/iccis/2012/4789/0", "title": "2012 Fourth International Conference on Computational and Information Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209b055", "title": "Surface Interpolation to Image with Edge Preserving", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209b055/12OmNqGRG7a", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2013/5051/0/5051a068", "title": "Growing Grid-Evolutionary Algorithm for Surface Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2013/5051a068/12OmNwogh3R", "parentPublication": { "id": "proceedings/cgiv/2013/5051/0", "title": "2013 10th International Conference Computer Graphics, Imaging and Visualization (CGIV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1989/1952/0/00037854", "title": "Discontinuity preserving surface reconstruction", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1989/00037854/12OmNyPQ4Q6", "parentPublication": { "id": "proceedings/cvpr/1989/1952/0", "title": "1989 IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/1988/0878/0/00028452", "title": "A new surface interpolation technique for reconstructing 3-D objects from serial cross sections", "doi": null, "abstractUrl": "/proceedings-article/icpr/1988/00028452/12OmNznkJR3", "parentPublication": { "id": "proceedings/icpr/1988/0878/0", "title": "9th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcc-smartcity-dss/2017/2588/0/08291962", "title": "Integrated Quality Mesh Generation for Poisson Surface Reconstruction in HPC Applications", "doi": null, "abstractUrl": "/proceedings-article/hpcc-smartcity-dss/2017/08291962/17D45VsBTYE", "parentPublication": { "id": "proceedings/hpcc-smartcity-dss/2017/2588/0", "title": "2017 IEEE 19th International Conference on High Performance Computing and Communications; IEEE 15th International Conference on Smart City; IEEE 3rd International Conference on Data Science and Systems (HPCC/SmartCity/DSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600g292", "title": "POCO: Point Convolution for Surface Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600g292/1H0KAZrauEo", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2019/2838/0/283800a417", "title": "Reconstruction of the CAD Model using TPS Surface", "doi": null, "abstractUrl": "/proceedings-article/iv/2019/283800a417/1cMFcnjZz2g", "parentPublication": { "id": "proceedings/iv/2019/2838/0", "title": "2019 23rd International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800a967", "title": "SSRNet: Scalable 3D Surface Reconstruction Network", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800a967/1m3nKc80MlG", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08283817", "articleId": "17D45XacGi3", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1y2FkV9ZFKM", "title": "Nov.", "year": "2021", "issueNum": "11", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Nov.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1y2Fxh3IZDG", "doi": "10.1109/TVCG.2021.3110543", "abstract": "Welcome to the November 2021 issue of the <italic>IEEE Transactions on Visualization and Computer Graphics (TVCG)</italic>. This issue contains selected papers accepted at the IEEE International Symposium on Mixed and Augmented Reality (ISMAR). The conference took place in Bari, Italy from October 4&#x2013;8, 2021 in virtual mode due to the COVID-19 pandemic.", "abstracts": [ { "abstractType": "Regular", "content": "Welcome to the November 2021 issue of the <italic>IEEE Transactions on Visualization and Computer Graphics (TVCG)</italic>. This issue contains selected papers accepted at the IEEE International Symposium on Mixed and Augmented Reality (ISMAR). The conference took place in Bari, Italy from October 4&#x2013;8, 2021 in virtual mode due to the COVID-19 pandemic.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Welcome to the November 2021 issue of the IEEE Transactions on Visualization and Computer Graphics (TVCG). This issue contains selected papers accepted at the IEEE International Symposium on Mixed and Augmented Reality (ISMAR). The conference took place in Bari, Italy from October 4–8, 2021 in virtual mode due to the COVID-19 pandemic.", "title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief", "normalizedTitle": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief", "fno": "09591457", "hasPdf": true, "idPrefix": "tg", "keywords": [], "authors": [ { "givenName": "Klaus", "surname": "Mueller", "fullName": "Klaus Mueller", "affiliation": "Stony Brook University (State University of New York), USA", "__typename": "ArticleAuthorType" }, { "givenName": "Doug", "surname": "Bowman", "fullName": "Doug Bowman", "affiliation": "Virginia Tech, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "11", "pubDate": "2021-11-01 00:00:00", "pubType": "trans", "pages": "4085-4085", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tg/2012/12/ttg20121200ix", "title": "Message from the Editor-in-Chief", "doi": null, "abstractUrl": "/journal/tg/2012/12/ttg20121200ix/13rRUwIF69i", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/12/06935055", "title": "Message from the Editor-in-Chief", "doi": null, "abstractUrl": "/journal/tg/2014/12/06935055/13rRUwh80He", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/11/08053887", "title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief", "doi": null, "abstractUrl": "/journal/tg/2017/11/08053887/13rRUxBa56a", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/11/08514109", "title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief", "doi": null, "abstractUrl": "/journal/tg/2018/11/08514109/14M3E12c6Eo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09927195", "title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief", "doi": null, "abstractUrl": "/journal/tg/2022/11/09927195/1HGJm87UJvq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08855103", "title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief", "doi": null, "abstractUrl": "/journal/tg/2019/11/08855103/1dNHm0Dq8lG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09254193", "title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief", "doi": null, "abstractUrl": "/journal/tg/2020/12/09254193/1oDXLUaRaDK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/11/09591492", "title": "Message from the ISMAR 2021 Science and Technology Journal Program Chairs and TVCG Guest Editors", "doi": null, "abstractUrl": "/journal/tg/2021/11/09591492/1y2FvGMxBuM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/01/09663062", "title": "Message from the Editor-in-Chief", "doi": null, "abstractUrl": "/journal/tg/2022/01/09663062/1zBaC3IZK9y", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/01/09663061", "title": "Preface", "doi": null, "abstractUrl": "/journal/tg/2022/01/09663061/1zBb8giCGEU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": null, "next": { "fno": "09591492", "articleId": "1y2FvGMxBuM", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1y2FkV9ZFKM", "title": "Nov.", "year": "2021", "issueNum": "11", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Nov.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1y2FvGMxBuM", "doi": "10.1109/TVCG.2021.3110544", "abstract": "In this special issue of <italic>IEEE Transactions on Visualization and Computer Graphics (TVCG)</italic>, we are pleased to present the journal papers from the 20th IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2021), which will be held as a virtual conference between October 4 and 8, 2021. ISMAR continues the over twenty year long tradition of IWAR, ISMR, and ISAR, and is the premier conference for Mixed and Augmented Reality in the world.", "abstracts": [ { "abstractType": "Regular", "content": "In this special issue of <italic>IEEE Transactions on Visualization and Computer Graphics (TVCG)</italic>, we are pleased to present the journal papers from the 20th IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2021), which will be held as a virtual conference between October 4 and 8, 2021. ISMAR continues the over twenty year long tradition of IWAR, ISMR, and ISAR, and is the premier conference for Mixed and Augmented Reality in the world.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this special issue of IEEE Transactions on Visualization and Computer Graphics (TVCG), we are pleased to present the journal papers from the 20th IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2021), which will be held as a virtual conference between October 4 and 8, 2021. ISMAR continues the over twenty year long tradition of IWAR, ISMR, and ISAR, and is the premier conference for Mixed and Augmented Reality in the world.", "title": "Message from the ISMAR 2021 Science and Technology Journal Program Chairs and TVCG Guest Editors", "normalizedTitle": "Message from the ISMAR 2021 Science and Technology Journal Program Chairs and TVCG Guest Editors", "fno": "09591492", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Special Issues And Sections", "Augmented Reality", "Meetings" ], "authors": [ { "givenName": "Daisuke", "surname": "Iwai", "fullName": "Daisuke Iwai", "affiliation": "Osaka University, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Guillaume", "surname": "Moreau", "fullName": "Guillaume Moreau", "affiliation": "IMT Atlantique, France", "__typename": "ArticleAuthorType" }, { "givenName": "Denis", "surname": "Kalkofen", "fullName": "Denis Kalkofen", "affiliation": "Graz University of Technology, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Tabitha", "surname": "Peck", "fullName": "Tabitha Peck", "affiliation": "Davidson College, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "11", "pubDate": "2021-11-01 00:00:00", "pubType": "trans", "pages": "4086-4086", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tg/2017/11/08053887", "title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief", "doi": null, "abstractUrl": "/journal/tg/2017/11/08053887/13rRUxBa56a", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/11/08514064", "title": "Message from the ISMAR 2018 Science and Technology Program Chairs and TVCG Guest Editors", "doi": null, "abstractUrl": "/journal/tg/2018/11/08514064/14M3DZSFbS8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699218", "title": "Message from the ISMAR 2018 Science and Technology Program Chairs and TVCG Guest Editors", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699218/19F1TteG3QI", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/05/09754286", "title": "IEEE VR 2022 Message from the Journal Paper Chairs and Guest Editors", "doi": null, "abstractUrl": "/journal/tg/2022/05/09754286/1Cpd7Bwusk8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09927176", "title": "Message from the ISMAR 2022 Science and Technology Journal Program Chairs and TVCG Guest Editors", "doi": null, "abstractUrl": "/journal/tg/2022/11/09927176/1HGJ8mlD3S8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08855105", "title": "Message from the ISMAR 2019 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors", "doi": null, "abstractUrl": "/journal/tg/2019/11/08855105/1dNHma690d2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09254194", "title": "Message from the ISMAR 2020 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors", "doi": null, "abstractUrl": "/journal/tg/2020/12/09254194/1oDXMHvn1aU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800z020", "title": "Message from the ISMAR 2020 Science and Technology Program Chairs and TVCG Guest Editors", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800z020/1pysy7gKfLO", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09405571", "title": "Introducing the IEEE Virtual Reality 2021 Special Issue", "doi": null, "abstractUrl": "/journal/tg/2021/05/09405571/1sP18PmVuQU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/06/09430173", "title": "Guest Editors' Introduction: Special Section on IEEE PacificVis 2021", "doi": null, "abstractUrl": "/journal/tg/2021/06/09430173/1tzuiF6azcs", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09591457", "articleId": "1y2Fxh3IZDG", "__typename": "AdjacentArticleType" }, "next": { "fno": "09523830", "articleId": "1wpqs1dtKes", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1y2FkV9ZFKM", "title": "Nov.", "year": "2021", "issueNum": "11", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Nov.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1wpqs1dtKes", "doi": "10.1109/TVCG.2021.3106492", "abstract": "A common approach for Augmented Reality labeling is to display the label text on a flag planted into the real world element at a 3D anchor point. When there are more than just a few labels, the efficiency of the interface decreases as the user has to search for a given label sequentially. The search can be accelerated by sorting the labels alphabetically, but sorting all labels results in long and intersecting leader lines from the anchor points to the labels. This paper proposes a partially-sorted concentric label layout that leverages the search efficiency of sorting while avoiding the label display problems of long or intersecting leader lines. The labels are partitioned into a small number of sorted sequences displayed on circles of increasing radii. Since the labels on a circle are sorted, the user can quickly search each circle. A tight upper bound derived from circular permutation theory limits the number of circles and thereby the complexity of the label layout. For example, 12 labels require at most three circles. When the application allows it, the labels are presorted to further reduce the number of circles in the layout. The layout was tested in a user study where it significantly reduced the label searching time compared to a conventional single-circle layout.", "abstracts": [ { "abstractType": "Regular", "content": "A common approach for Augmented Reality labeling is to display the label text on a flag planted into the real world element at a 3D anchor point. When there are more than just a few labels, the efficiency of the interface decreases as the user has to search for a given label sequentially. The search can be accelerated by sorting the labels alphabetically, but sorting all labels results in long and intersecting leader lines from the anchor points to the labels. This paper proposes a partially-sorted concentric label layout that leverages the search efficiency of sorting while avoiding the label display problems of long or intersecting leader lines. The labels are partitioned into a small number of sorted sequences displayed on circles of increasing radii. Since the labels on a circle are sorted, the user can quickly search each circle. A tight upper bound derived from circular permutation theory limits the number of circles and thereby the complexity of the label layout. For example, 12 labels require at most three circles. When the application allows it, the labels are presorted to further reduce the number of circles in the layout. The layout was tested in a user study where it significantly reduced the label searching time compared to a conventional single-circle layout.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A common approach for Augmented Reality labeling is to display the label text on a flag planted into the real world element at a 3D anchor point. When there are more than just a few labels, the efficiency of the interface decreases as the user has to search for a given label sequentially. The search can be accelerated by sorting the labels alphabetically, but sorting all labels results in long and intersecting leader lines from the anchor points to the labels. This paper proposes a partially-sorted concentric label layout that leverages the search efficiency of sorting while avoiding the label display problems of long or intersecting leader lines. The labels are partitioned into a small number of sorted sequences displayed on circles of increasing radii. Since the labels on a circle are sorted, the user can quickly search each circle. A tight upper bound derived from circular permutation theory limits the number of circles and thereby the complexity of the label layout. For example, 12 labels require at most three circles. When the application allows it, the labels are presorted to further reduce the number of circles in the layout. The layout was tested in a user study where it significantly reduced the label searching time compared to a conventional single-circle layout.", "title": "A Partially-Sorted Concentric Layout for Efficient Label Localization in Augmented Reality", "normalizedTitle": "A Partially-Sorted Concentric Layout for Efficient Label Localization in Augmented Reality", "fno": "09523830", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Augmented Reality", "Computational Complexity", "Data Visualisation", "Sorting", "Text Analysis", "Efficient Label Localization", "Augmented Reality Labeling", "Label Text", "Given Label", "Labels Results", "Partially Sorted Concentric Label Layout", "Label Display Problems", "Circle", "Partially Sorted Concentric Layout", "Layout", "Sorting", "Annotations", "Search Problems", "Labeling", "Gaze Tracking", "Augmented Reality", "Label Layout", "Fast Label Finding" ], "authors": [ { "givenName": "Zijing", "surname": "Zhou", "fullName": "Zijing Zhou", "affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, School of Computer Science and Engineering, Beihang University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Lili", "surname": "Wang", "fullName": "Lili Wang", "affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, Beihang University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Voicu", "surname": "Popescu", "fullName": "Voicu Popescu", "affiliation": "Purdue University, U.S.", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "11", "pubDate": "2021-11-01 00:00:00", "pubType": "trans", "pages": "4087-4096", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/pacificvis/2015/6879/0/07156379", "title": "Clutter-aware label layout", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2015/07156379/12OmNyY4rqE", "parentPublication": { "id": "proceedings/pacificvis/2015/6879/0", "title": "2015 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600o4136", "title": "Large Loss Matters in Weakly Supervised Multi-Label Classification", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600o4136/1H1lPFhwti0", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/5555/01/09999508", "title": "Multi-View Partial Multi-Label Learning via Graph-Fusion-Based Label Enhancement", "doi": null, "abstractUrl": "/journal/tk/5555/01/09999508/1JrMyxvllsY", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a441", "title": "Label Guidance based Object Locating in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a441/1JrRbIVIzPG", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbd/2022/0971/0/097100a116", "title": "Label Enhancement with Sample Correlation via Sparse Representation", "doi": null, "abstractUrl": "/proceedings-article/cbd/2022/097100a116/1KdZhjYjh0Q", "parentPublication": { "id": "proceedings/cbd/2022/0971/0", "title": "2022 Tenth International Conference on Advanced Cloud and Big Data (CBD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcc-dss-smartcity-dependsys/2022/1993/0/199300b713", "title": "An Effective Approach for Multi-label Classification with Missing Labels", "doi": null, "abstractUrl": "/proceedings-article/hpcc-dss-smartcity-dependsys/2022/199300b713/1LSPGZyQHio", "parentPublication": { "id": "proceedings/hpcc-dss-smartcity-dependsys/2022/1993/0", "title": "2022 IEEE 24th Int Conf on High Performance Computing & Communications; 8th Int Conf on Data Science & Systems; 20th Int Conf on Smart City; 8th Int Conf on Dependability in Sensor, Cloud & Big Data Systems & Application (HPCC/DSS/SmartCity/DependSys)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/03/09171331", "title": "Incomplete Label Multiple Instance Multiple Label Learning", "doi": null, "abstractUrl": "/journal/tp/2022/03/09171331/1mq8fINsglW", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/07/09354590", "title": "Partial Multi-Label Learning With Noisy Label Identification", "doi": null, "abstractUrl": "/journal/tp/2022/07/09354590/1reXib2cwWk", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2023/02/09465741", "title": "Fast Label Enhancement for Label Distribution Learning", "doi": null, "abstractUrl": "/journal/tk/2023/02/09465741/1uIR9VwB7Xy", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900a933", "title": "Multi-Label Learning from Single Positive Labels", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900a933/1yeIVTYm12g", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09591492", "articleId": "1y2FvGMxBuM", "__typename": "AdjacentArticleType" }, "next": { "fno": "09523888", "articleId": "1wpqxgia3Vm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1y2Frg4eSBy", "name": "ttg202111-09523830s1-supp1-3106492.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202111-09523830s1-supp1-3106492.mp4", "extension": "mp4", "size": "76 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1y2FkV9ZFKM", "title": "Nov.", "year": "2021", "issueNum": "11", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Nov.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1wpqxgia3Vm", "doi": "10.1109/TVCG.2021.3106497", "abstract": "High dynamic range (HDR) panoramic environment maps are widely used to illuminate virtual objects to blend with real-world scenes. However, in common applications for augmented and mixed-reality (AR/MR), capturing 360&#x00B0; surroundings to obtain an HDR environment map is often not possible using consumer-level devices. We present a novel light estimation method to predict 360&#x00B0; HDR environment maps from a single photograph with a limited field-of-view (FOV). We introduce the Dynamic Lighting network (DLNet), a convolutional neural network that dynamically generates the convolution filters based on the input photograph sample to adaptively learn the lighting cues within each photograph. We propose novel Spherical Multi-Scale Dynamic (SMD) convolutional modules to dynamically generate sample-specific kernels for decoding features in the spherical domain to predict 360&#x00B0; environment maps. Using DLNet and data augmentations with respect to FOV, an exposure multiplier, and color temperature, our model shows the capability of estimating lighting under diverse input variations. Compared with prior work that fixes the network filters once trained, our method maintains lighting consistency across different exposure multipliers and color temperature, and maintains robust light estimation accuracy as FOV increases. The surrounding lighting information estimated by our method ensures coherent illumination of 3D objects blended with the input photograph, enabling high fidelity augmented and mixed reality supporting a wide range of environmental lighting conditions and device sensors.", "abstracts": [ { "abstractType": "Regular", "content": "High dynamic range (HDR) panoramic environment maps are widely used to illuminate virtual objects to blend with real-world scenes. However, in common applications for augmented and mixed-reality (AR/MR), capturing 360&#x00B0; surroundings to obtain an HDR environment map is often not possible using consumer-level devices. We present a novel light estimation method to predict 360&#x00B0; HDR environment maps from a single photograph with a limited field-of-view (FOV). We introduce the Dynamic Lighting network (DLNet), a convolutional neural network that dynamically generates the convolution filters based on the input photograph sample to adaptively learn the lighting cues within each photograph. We propose novel Spherical Multi-Scale Dynamic (SMD) convolutional modules to dynamically generate sample-specific kernels for decoding features in the spherical domain to predict 360&#x00B0; environment maps. Using DLNet and data augmentations with respect to FOV, an exposure multiplier, and color temperature, our model shows the capability of estimating lighting under diverse input variations. Compared with prior work that fixes the network filters once trained, our method maintains lighting consistency across different exposure multipliers and color temperature, and maintains robust light estimation accuracy as FOV increases. The surrounding lighting information estimated by our method ensures coherent illumination of 3D objects blended with the input photograph, enabling high fidelity augmented and mixed reality supporting a wide range of environmental lighting conditions and device sensors.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "High dynamic range (HDR) panoramic environment maps are widely used to illuminate virtual objects to blend with real-world scenes. However, in common applications for augmented and mixed-reality (AR/MR), capturing 360° surroundings to obtain an HDR environment map is often not possible using consumer-level devices. We present a novel light estimation method to predict 360° HDR environment maps from a single photograph with a limited field-of-view (FOV). We introduce the Dynamic Lighting network (DLNet), a convolutional neural network that dynamically generates the convolution filters based on the input photograph sample to adaptively learn the lighting cues within each photograph. We propose novel Spherical Multi-Scale Dynamic (SMD) convolutional modules to dynamically generate sample-specific kernels for decoding features in the spherical domain to predict 360° environment maps. Using DLNet and data augmentations with respect to FOV, an exposure multiplier, and color temperature, our model shows the capability of estimating lighting under diverse input variations. Compared with prior work that fixes the network filters once trained, our method maintains lighting consistency across different exposure multipliers and color temperature, and maintains robust light estimation accuracy as FOV increases. The surrounding lighting information estimated by our method ensures coherent illumination of 3D objects blended with the input photograph, enabling high fidelity augmented and mixed reality supporting a wide range of environmental lighting conditions and device sensors.", "title": "Adaptive Light Estimation using Dynamic Filtering for Diverse Lighting Conditions", "normalizedTitle": "Adaptive Light Estimation using Dynamic Filtering for Diverse Lighting Conditions", "fno": "09523888", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Augmented Reality", "Convolutional Neural Nets", "Estimation Theory", "Filtering Theory", "Image Colour Analysis", "Lighting", "Rendering Computer Graphics", "Virtual Reality", "Data Augmentations", "FOV", "Color Temperature", "Diverse Input Variations", "Network Filters", "Lighting Consistency", "Mixed Reality", "Environmental Lighting Conditions", "Device Sensors", "Adaptive Light Estimation", "Dynamic Filtering", "Diverse Lighting Conditions", "High Dynamic Range Panoramic Environment Maps", "Mixed Reality", "HDR Environment Map", "Consumer Level Devices", "Light Estimation Method", "Single Photograph", "DL Net", "Convolutional Neural Network", "Input Photograph Sample", "Sample Specific Kernels", "Illuminate Virtual Objects", "AR MR", "Augmented Reality", "Spherical Multiscale Dynamic Convolutional Modules", "SMD", "Limited Field Of View", "Dynamic Lighting Network", "Lighting", "Feature Extraction", "Estimation", "Convolution", "Image Color Analysis", "Decoding", "Adaptation Models", "Augmented Reality", "Mixed Reality", "Lighting", "Light Estimation", "Deep Learning" ], "authors": [ { "givenName": "Junhong", "surname": "Zhao", "fullName": "Junhong Zhao", "affiliation": "Computational Media Innovation Centre (CMIC), Victoria University of Wellington, New Zealand", "__typename": "ArticleAuthorType" }, { "givenName": "Andrew", "surname": "Chalmers", "fullName": "Andrew Chalmers", "affiliation": "Computational Media Innovation Centre (CMIC), Victoria University of Wellington, New Zealand", "__typename": "ArticleAuthorType" }, { "givenName": "Taehyun", "surname": "Rhee", "fullName": "Taehyun Rhee", "affiliation": "Computational Media Innovation Centre (CMIC), Victoria University of Wellington, New Zealand", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "11", "pubDate": "2021-11-01 00:00:00", "pubType": "trans", "pages": "4097-4106", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2012/1611/0/06239343", "title": "Recovering spectral reflectance under commonly available lighting conditions", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2012/06239343/12OmNBTs7yq", "parentPublication": { "id": "proceedings/cvprw/2012/1611/0", "title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2004/8788/0/87880281", "title": "Light Collages: Lighting Design for Effective Visualization", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2004/87880281/12OmNrJAe0V", "parentPublication": { "id": "proceedings/ieee-vis/2004/8788/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2018/8425/0/842500a199", "title": "Learning to Estimate Indoor Lighting from 3D Objects", "doi": null, "abstractUrl": "/proceedings-article/3dv/2018/842500a199/17D45XacGj6", "parentPublication": { "id": "proceedings/3dv/2018/8425/0", "title": "2018 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300g920", "title": "Deep Sky Modeling for Single Image Outdoor Lighting Estimation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300g920/1gyrdbEY2sM", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300k0150", "title": "All-Weather Deep Outdoor Lighting Estimation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300k0150/1gyrg6Ricuc", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/10/09115833", "title": "Reconstructing Reflection Maps Using a Stacked-CNN for Mixed Reality Rendering", "doi": null, "abstractUrl": "/journal/tg/2021/10/09115833/1kBgVhAEmeA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2020/9360/0/09150983", "title": "Deep Lighting Environment Map Estimation from Spherical Panoramas", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2020/09150983/1lPH5UYz7Da", "parentPublication": { "id": "proceedings/cvprw/2020/9360/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2020/8128/0/812800a355", "title": "High-Dynamic-Range Lighting Estimation From Face Portraits", "doi": null, "abstractUrl": "/proceedings-article/3dv/2020/812800a355/1qyxlbQeCtO", "parentPublication": { "id": "proceedings/3dv/2020/8128/0", "title": "2020 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccea/2021/2616/0/261600a511", "title": "Analysis of Light Source Selection and Lighting Technology in Machine Vision", "doi": null, "abstractUrl": "/proceedings-article/iccea/2021/261600a511/1y4owJBt4li", "parentPublication": { "id": "proceedings/iccea/2021/2616/0", "title": "2021 International Conference on Computer Engineering and Application (ICCEA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900k0586", "title": "Lighting, Reflectance and Geometry Estimation from 360&#x00B0; Panoramic Stereo", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900k0586/1yeIplXJ9wQ", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09523830", "articleId": "1wpqs1dtKes", "__typename": "AdjacentArticleType" }, "next": { "fno": "09523843", "articleId": "1wpqlQWCIxy", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1y2FwBf0Tyo", "name": "ttg202111-09523888s1-supp1-3106497.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202111-09523888s1-supp1-3106497.pdf", "extension": "pdf", "size": "6.77 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1y2FkV9ZFKM", "title": "Nov.", "year": "2021", "issueNum": "11", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Nov.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1wpqlQWCIxy", "doi": "10.1109/TVCG.2021.3106429", "abstract": "We present a CPU-based real-time cloth animation method for dressing virtual humans of various shapes and poses. Our approach formulates the clothing deformation as a high-dimensional function of body shape parameters and pose parameters. In order to accelerate the computation, our formulation factorizes the clothing deformation into two independent components: the deformation introduced by body pose variation (Clothing Pose Model) and the deformation from body shape variation (Clothing Shape Model). Furthermore, we sample and cluster the poses spanning the entire pose space and use those clusters to efficiently calculate the anchoring points. We also introduce a sensitivity-based distance measurement to both find nearby anchoring points and evaluate their contributions to the final animation. Given a query shape and pose of the virtual agent, we synthesize the resulting clothing deformation by blending the Taylor expansion results of nearby anchoring points. Compared to previous methods, our approach is general and able to add the shape dimension to any clothing pose model. Furthermore, we can animate clothing represented with tens of thousands of vertices at 50+ FPS on a CPU. We also conduct a user evaluation and show that our method can improve a user&#x0027;s perception of dressed virtual agents in an immersive virtual environment (IVE) compared to a realtime linear blend skinning method.", "abstracts": [ { "abstractType": "Regular", "content": "We present a CPU-based real-time cloth animation method for dressing virtual humans of various shapes and poses. Our approach formulates the clothing deformation as a high-dimensional function of body shape parameters and pose parameters. In order to accelerate the computation, our formulation factorizes the clothing deformation into two independent components: the deformation introduced by body pose variation (Clothing Pose Model) and the deformation from body shape variation (Clothing Shape Model). Furthermore, we sample and cluster the poses spanning the entire pose space and use those clusters to efficiently calculate the anchoring points. We also introduce a sensitivity-based distance measurement to both find nearby anchoring points and evaluate their contributions to the final animation. Given a query shape and pose of the virtual agent, we synthesize the resulting clothing deformation by blending the Taylor expansion results of nearby anchoring points. Compared to previous methods, our approach is general and able to add the shape dimension to any clothing pose model. Furthermore, we can animate clothing represented with tens of thousands of vertices at 50+ FPS on a CPU. We also conduct a user evaluation and show that our method can improve a user&#x0027;s perception of dressed virtual agents in an immersive virtual environment (IVE) compared to a realtime linear blend skinning method.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a CPU-based real-time cloth animation method for dressing virtual humans of various shapes and poses. Our approach formulates the clothing deformation as a high-dimensional function of body shape parameters and pose parameters. In order to accelerate the computation, our formulation factorizes the clothing deformation into two independent components: the deformation introduced by body pose variation (Clothing Pose Model) and the deformation from body shape variation (Clothing Shape Model). Furthermore, we sample and cluster the poses spanning the entire pose space and use those clusters to efficiently calculate the anchoring points. We also introduce a sensitivity-based distance measurement to both find nearby anchoring points and evaluate their contributions to the final animation. Given a query shape and pose of the virtual agent, we synthesize the resulting clothing deformation by blending the Taylor expansion results of nearby anchoring points. Compared to previous methods, our approach is general and able to add the shape dimension to any clothing pose model. Furthermore, we can animate clothing represented with tens of thousands of vertices at 50+ FPS on a CPU. We also conduct a user evaluation and show that our method can improve a user's perception of dressed virtual agents in an immersive virtual environment (IVE) compared to a realtime linear blend skinning method.", "title": "AgentDress: Realtime Clothing Synthesis for Virtual Agents using Plausible Deformations", "normalizedTitle": "AgentDress: Realtime Clothing Synthesis for Virtual Agents using Plausible Deformations", "fno": "09523843", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Clothing", "Computer Animation", "Pose Estimation", "Solid Modelling", "Virtual Reality", "Realtime Clothing Synthesis", "Virtual Agent", "Plausible Deformations", "CPU Based Real Time Cloth Animation Method", "Virtual Humans", "High Dimensional Function", "Body Shape Parameters", "Clothing Pose Model", "Body Shape Variation", "Clothing Shape Model", "Sensitivity Based Distance Measurement", "Nearby Anchoring Points", "Query Shape", "Resulting Clothing Deformation", "Shape Dimension", "Dressed Virtual Agents", "Immersive Virtual Environment", "Realtime Linear Blend Skinning Method", "Clothing", "Strain", "Shape", "Animation", "Real Time Systems", "Deformable Models", "Computational Modeling", "Clothing Animation", "Virtual Agents", "Social VR", "Virtual Try On Clothing Shape Models" ], "authors": [ { "givenName": "Nannan", "surname": "Wu", "fullName": "Nannan Wu", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Qianwen", "surname": "Chao", "fullName": "Qianwen Chao", "affiliation": "Department of Computer Science, Xidian University, Xi'an, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yanzhen", "surname": "Chen", "fullName": "Yanzhen Chen", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Weiwei", "surname": "Xu", "fullName": "Weiwei Xu", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Chen", "surname": "Liu", "fullName": "Chen Liu", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Dinesh", "surname": "Manocha", "fullName": "Dinesh Manocha", "affiliation": "Department of Computer Science, University of Maryland, College Park, MD, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Wenxin", "surname": "Sun", "fullName": "Wenxin Sun", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yi", "surname": "Han", "fullName": "Yi Han", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xinran", "surname": "Yao", "fullName": "Xinran Yao", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xiaogang", "surname": "Jin", "fullName": "Xiaogang Jin", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "11", "pubDate": "2021-11-01 00:00:00", "pubType": "trans", "pages": "4107-4118", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2017/0457/0/0457f484", "title": "Detailed, Accurate, Human Shape Estimation from Clothed 3D Scan Sequences", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457f484/12OmNCbCrYI", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2014/7000/1/7000a171", "title": "A Layered Model of Human Body and Garment Deformation", "doi": null, "abstractUrl": "/proceedings-article/3dv/2014/7000a171/12OmNCf1DwE", "parentPublication": { "id": "3dv/2014/7000/1", "title": "2014 2nd International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032a853", "title": "A Generative Model of People in Clothing", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032a853/12OmNCwlakU", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2018/3788/0/08545658", "title": "Representing a Partially Observed Non-Rigid 3D Human Using Eigen-Texture and Eigen-Deformation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08545658/17D45XuDNE6", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200l1688", "title": "Neural-GIF: Neural Generalized Implicit Functions for Animating People in Clothing", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200l1688/1BmLeBojAWs", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200k0954", "title": "The Power of Points for Modeling Humans in Clothing", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200k0954/1BmLrmWbNuM", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2022/5670/0/567000a679", "title": "Neural Point-based Shape Modeling of Humans in Challenging Clothing", "doi": null, "abstractUrl": "/proceedings-article/3dv/2022/567000a679/1KYsvi8qLS0", "parentPublication": { "id": "proceedings/3dv/2022/5670/0", "title": "2022 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800h363", "title": "TailorNet: Predicting Clothing in 3D as a Function of Human Pose, Shape and Garment Style", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800h363/1m3nnD97pZu", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800g468", "title": "Learning to Dress 3D People in Generative Clothing", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800g468/1m3nwUHFD68", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2020/8128/0/812800a322", "title": "MonoClothCap: Towards Temporally Coherent Clothing Capture from Monocular RGB Video", "doi": null, "abstractUrl": "/proceedings-article/3dv/2020/812800a322/1qyxk1bcV5S", "parentPublication": { "id": "proceedings/3dv/2020/8128/0", "title": "2020 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09523888", "articleId": "1wpqxgia3Vm", "__typename": "AdjacentArticleType" }, "next": { "fno": "09523841", "articleId": "1wpqtpOgOqI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1y2FnK0CcmI", "name": "ttg202111-09523843s1-supp1-3106429.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202111-09523843s1-supp1-3106429.pdf", "extension": "pdf", "size": "5.03 MB", "__typename": "WebExtraType" }, { "id": "1y2FnE32t0I", "name": "ttg202111-09523843s1-supp3-3106429.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202111-09523843s1-supp3-3106429.mp4", "extension": "mp4", "size": "6.22 MB", "__typename": "WebExtraType" }, { "id": "1y2FnplS5tC", "name": "ttg202111-09523843s1-supp2-3106429.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202111-09523843s1-supp2-3106429.mp4", "extension": "mp4", "size": "31 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1y2FkV9ZFKM", "title": "Nov.", "year": "2021", "issueNum": "11", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Nov.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1wpqtpOgOqI", "doi": "10.1109/TVCG.2021.3106479", "abstract": "Civil engineering is a primary domain for new augmented reality technologies. In this work, the area of subsurface utility engineering is revisited, and new methods tackling well-known, yet unsolved problems are presented. We describe our solution to the outdoor localization problem, which is deemed one of the most critical issues in outdoor augmented reality, proposing a novel, lightweight hardware platform to generate highly accurate position and orientation estimates in a global context. Furthermore, we present new approaches to drastically improve realism of outdoor data visualizations. First, a novel method to replace physical spray markings by indistinguishable virtual counterparts is described. Second, the visualization of 3D reconstructions of real excavations is presented, fusing seamlessly with the view onto the real environment. We demonstrate the power of these new methods on a set of different outdoor scenarios.", "abstracts": [ { "abstractType": "Regular", "content": "Civil engineering is a primary domain for new augmented reality technologies. In this work, the area of subsurface utility engineering is revisited, and new methods tackling well-known, yet unsolved problems are presented. We describe our solution to the outdoor localization problem, which is deemed one of the most critical issues in outdoor augmented reality, proposing a novel, lightweight hardware platform to generate highly accurate position and orientation estimates in a global context. Furthermore, we present new approaches to drastically improve realism of outdoor data visualizations. First, a novel method to replace physical spray markings by indistinguishable virtual counterparts is described. Second, the visualization of 3D reconstructions of real excavations is presented, fusing seamlessly with the view onto the real environment. We demonstrate the power of these new methods on a set of different outdoor scenarios.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Civil engineering is a primary domain for new augmented reality technologies. In this work, the area of subsurface utility engineering is revisited, and new methods tackling well-known, yet unsolved problems are presented. We describe our solution to the outdoor localization problem, which is deemed one of the most critical issues in outdoor augmented reality, proposing a novel, lightweight hardware platform to generate highly accurate position and orientation estimates in a global context. Furthermore, we present new approaches to drastically improve realism of outdoor data visualizations. First, a novel method to replace physical spray markings by indistinguishable virtual counterparts is described. Second, the visualization of 3D reconstructions of real excavations is presented, fusing seamlessly with the view onto the real environment. We demonstrate the power of these new methods on a set of different outdoor scenarios.", "title": "Augmented Reality for Subsurface Utility Engineering, Revisited", "normalizedTitle": "Augmented Reality for Subsurface Utility Engineering, Revisited", "fno": "09523841", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Augmented Reality", "Civil Engineering Computing", "Data Visualisation", "Subsurface Utility Engineering", "Civil Engineering", "Primary Domain", "Augmented Reality Technologies", "Outdoor Localization Problem", "Outdoor Augmented Reality", "Lightweight Hardware Platform", "Highly Accurate Position", "Orientation Estimates", "Outdoor Data Visualizations", "3 D Reconstructions", "Data Visualization", "Three Dimensional Displays", "Location Awareness", "Excavation", "Global Positioning System", "Daylighting", "Surface Reconstruction", "Augmented Reality", "Infrastructure", "Computer Graphics", "Localization" ], "authors": [ { "givenName": "Lasse H.", "surname": "Hansen", "fullName": "Lasse H. Hansen", "affiliation": "Aalborg University, Denmark", "__typename": "ArticleAuthorType" }, { "givenName": "Philipp", "surname": "Fleck", "fullName": "Philipp Fleck", "affiliation": "Graz University of Technology, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Marco", "surname": "Stranner", "fullName": "Marco Stranner", "affiliation": "Graz University of Technology, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Dieter", "surname": "Schmalstieg", "fullName": "Dieter Schmalstieg", "affiliation": "Graz University of Technology, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Clemens", "surname": "Arth", "fullName": "Clemens Arth", "affiliation": "AR4 GmbH, Graz, Austria", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "11", "pubDate": "2021-11-01 00:00:00", "pubType": "trans", "pages": "4119-4128", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2013/2869/0/06671820", "title": "Improved outdoor augmented reality through “Globalization”", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671820/12OmNrF2DO7", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscc/2012/2712/0/IS225", "title": "Vision-based user tracking for outdoor augmented reality", "doi": null, "abstractUrl": "/proceedings-article/iscc/2012/IS225/12OmNrH1PAZ", "parentPublication": { "id": "proceedings/iscc/2012/2712/0", "title": "2012 IEEE Symposium on Computers and Communications (ISCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2012/4660/0/06402588", "title": "Digital map based pose improvement for outdoor Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2012/06402588/12OmNrIrPqF", "parentPublication": { "id": "proceedings/ismar/2012/4660/0", "title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/grapp/2014/8078/0/07296087", "title": "Symbol adaptation assessment in outdoor augmented reality", "doi": null, "abstractUrl": "/proceedings-article/grapp/2014/07296087/12OmNrYCY3i", "parentPublication": { "id": "proceedings/grapp/2014/8078/0", "title": "2014 International Conference on Computer Graphics Theory and Applications (GRAPP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscid/2008/3311/2/3311b069", "title": "Development Actuality and Application of Registration Technology in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/iscid/2008/3311b069/12OmNxFsmHr", "parentPublication": { "id": "proceedings/iscid/2008/3311/2", "title": "2008 International Symposium on Computational Intelligence and Design", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iswc/2000/0795/0/07950139", "title": "ARQuake: An Outdoor/Indoor Augmented Reality First Person Application", "doi": null, "abstractUrl": "/proceedings-article/iswc/2000/07950139/12OmNxdDFAr", "parentPublication": { "id": "proceedings/iswc/2000/0795/0", "title": "Digest of Papers. Fourth International Symposium on Wearable Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/euc/2014/5249/0/5249a287", "title": "Mobile Augmented Reality System for Marine Navigation Assistance", "doi": null, "abstractUrl": "/proceedings-article/euc/2014/5249a287/12OmNxxdZFo", "parentPublication": { "id": "proceedings/euc/2014/5249/0", "title": "2014 12th IEEE International Conference on Embedded and Ubiquitous Computing (EUC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2013/2246/0/2246a132", "title": "Estimation of Environmental Lighting from Known Geometries for Mobile Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/cw/2013/2246a132/12OmNy5R3C7", "parentPublication": { "id": "proceedings/cw/2013/2246/0", "title": "2013 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2010/4055/0/4055a450", "title": "Multi-Object Oriented Augmented Reality for Location-Based Adaptive Mobile Learning", "doi": null, "abstractUrl": "/proceedings-article/icalt/2010/4055a450/12OmNyUWR9U", "parentPublication": { "id": "proceedings/icalt/2010/4055/0", "title": "Advanced Learning Technologies, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2002/05/mcg2002050052", "title": "Archeoguide: An Augmented Reality Guide for Archaeological Sites", "doi": null, "abstractUrl": "/magazine/cg/2002/05/mcg2002050052/13rRUxBJhoQ", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09523843", "articleId": "1wpqlQWCIxy", "__typename": "AdjacentArticleType" }, "next": { "fno": "09523831", "articleId": "1wpqru2GjIY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1y2FkV9ZFKM", "title": "Nov.", "year": "2021", "issueNum": "11", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Nov.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1wpqru2GjIY", "doi": "10.1109/TVCG.2021.3106480", "abstract": "A 3D Telepresence system allows users to interact with each other in a virtual, mixed, or augmented reality (VR, MR, AR) environment, creating a shared space for collaboration and communication. There are two main methods for representing users within these 3D environments. Users can be represented either as point cloud reconstruction-based avatars that resemble a physical user or as virtual character-based avatars controlled by tracking the users&#x0027; body motion. This work compares both techniques to identify the differences between user representations and their fit in the reconstructed environments regarding the perceived presence, uncanny valley factors, and behavior impression. Our study uses an asymmetric VR/AR teleconsultation system that allows a remote user to join a local scene using VR. The local user observes the remote user with an AR head-mounted display, leading to facial occlusions in the 3D reconstruction. Participants perform a warm-up interaction task followed by a goal-directed collaborative puzzle task, pursuing a common goal. The local user was represented either as a point cloud reconstruction or as a virtual character-based avatar, in which case the point cloud reconstruction of the local user was masked. Our results show that the point cloud reconstruction-based avatar was superior to the virtual character avatar regarding perceived co-presence, social presence, behavioral impression, and humanness. Further, we found that the task type partly affected the perception. The point cloud reconstruction-based approach led to higher usability ratings, while objective performance measures showed no significant difference. We conclude that despite partly missing facial information, the point cloud-based reconstruction resulted in better conveyance of the user behavior and a more coherent fit into the simulation context.", "abstracts": [ { "abstractType": "Regular", "content": "A 3D Telepresence system allows users to interact with each other in a virtual, mixed, or augmented reality (VR, MR, AR) environment, creating a shared space for collaboration and communication. There are two main methods for representing users within these 3D environments. Users can be represented either as point cloud reconstruction-based avatars that resemble a physical user or as virtual character-based avatars controlled by tracking the users&#x0027; body motion. This work compares both techniques to identify the differences between user representations and their fit in the reconstructed environments regarding the perceived presence, uncanny valley factors, and behavior impression. Our study uses an asymmetric VR/AR teleconsultation system that allows a remote user to join a local scene using VR. The local user observes the remote user with an AR head-mounted display, leading to facial occlusions in the 3D reconstruction. Participants perform a warm-up interaction task followed by a goal-directed collaborative puzzle task, pursuing a common goal. The local user was represented either as a point cloud reconstruction or as a virtual character-based avatar, in which case the point cloud reconstruction of the local user was masked. Our results show that the point cloud reconstruction-based avatar was superior to the virtual character avatar regarding perceived co-presence, social presence, behavioral impression, and humanness. Further, we found that the task type partly affected the perception. The point cloud reconstruction-based approach led to higher usability ratings, while objective performance measures showed no significant difference. We conclude that despite partly missing facial information, the point cloud-based reconstruction resulted in better conveyance of the user behavior and a more coherent fit into the simulation context.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A 3D Telepresence system allows users to interact with each other in a virtual, mixed, or augmented reality (VR, MR, AR) environment, creating a shared space for collaboration and communication. There are two main methods for representing users within these 3D environments. Users can be represented either as point cloud reconstruction-based avatars that resemble a physical user or as virtual character-based avatars controlled by tracking the users' body motion. This work compares both techniques to identify the differences between user representations and their fit in the reconstructed environments regarding the perceived presence, uncanny valley factors, and behavior impression. Our study uses an asymmetric VR/AR teleconsultation system that allows a remote user to join a local scene using VR. The local user observes the remote user with an AR head-mounted display, leading to facial occlusions in the 3D reconstruction. Participants perform a warm-up interaction task followed by a goal-directed collaborative puzzle task, pursuing a common goal. The local user was represented either as a point cloud reconstruction or as a virtual character-based avatar, in which case the point cloud reconstruction of the local user was masked. Our results show that the point cloud reconstruction-based avatar was superior to the virtual character avatar regarding perceived co-presence, social presence, behavioral impression, and humanness. Further, we found that the task type partly affected the perception. The point cloud reconstruction-based approach led to higher usability ratings, while objective performance measures showed no significant difference. We conclude that despite partly missing facial information, the point cloud-based reconstruction resulted in better conveyance of the user behavior and a more coherent fit into the simulation context.", "title": "Avatars for Teleconsultation: Effects of Avatar Embodiment Techniques on User Perception in 3D Asymmetric Telepresence", "normalizedTitle": "Avatars for Teleconsultation: Effects of Avatar Embodiment Techniques on User Perception in 3D Asymmetric Telepresence", "fno": "09523831", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Augmented Reality", "Avatars", "Face Recognition", "Helmet Mounted Displays", "Image Reconstruction", "Stereo Image Processing", "Telecontrol", "Point Cloud Based Reconstruction", "User Behavior", "Avatar Embodiment Techniques", "User Perception", "Virtual Reality", "Mixed Reality", "Augmented Reality", "Point Cloud Reconstruction Based Avatar", "Physical User", "Virtual Character Based Avatar", "User Representations", "Reconstructed Environments", "Behavior Impression", "Remote User", "Local User", "Goal Directed Collaborative Puzzle Task", "Virtual Character Avatar", "Behavioral Impression", "3 D Telepresence System", "3 D Asymmetric Telepresence", "Teleconsultation Avatars", "3 D Environments", "Users Body Motion", "Perceived Presence", "Uncanny Valley Factors", "Asymmetric VR AR Teleconsultation System", "AR Head Mounted Display", "Facial Occlusions", "3 D Reconstruction", "Warm Up Interaction Task", "Perceived Co Presence", "Social Presence", "Usability Ratings", "Partly Missing Facial Information", "Avatars", "Three Dimensional Displays", "Telepresence", "Task Analysis", "Collaboration", "Real Time Systems", "Faces", "Telepresence", "Avatars", "Augmented Reality", "Mixed Reality", "Virtual Reality", "Collaboration", "Embodiment" ], "authors": [ { "givenName": "Kevin", "surname": "Yu", "fullName": "Kevin Yu", "affiliation": "Research group MITI, Technical University of Munich, Munchen, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Gleb", "surname": "Gorbachev", "fullName": "Gleb Gorbachev", "affiliation": "Computer Aided Medical Procedures, Technical University of Munich, Munchen, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Ulrich", "surname": "Eck", "fullName": "Ulrich Eck", "affiliation": "Computer Aided Medical Procedures, Technical University of Munich, Munchen, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Frieder", "surname": "Pankratz", "fullName": "Frieder Pankratz", "affiliation": "Institute for Emergency Medicine, Ludwig Maximilian University, Munchen, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Nassir", "surname": "Navab", "fullName": "Nassir Navab", "affiliation": "Chair of Computer Aided Medical Procedures, Technical University of Munich, Munchen, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Daniel", "surname": "Roth", "fullName": "Daniel Roth", "affiliation": "Human-Centered Computing and Extended Reality, Friedrich-Alexander University (FAU), Erlangen-Nuremberg, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "11", "pubDate": "2021-11-01 00:00:00", "pubType": "trans", "pages": "4129-4139", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2013/4795/0/06549424", "title": "Rapid generation of personalized avatars", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549424/12OmNyQGShm", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09873991", "title": "Predict-and-Drive: Avatar Motion Adaption in Room-Scale Augmented Reality Telepresence with Heterogeneous Spaces", "doi": null, "abstractUrl": "/journal/tg/2022/11/09873991/1GjwGcGrRmg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a072", "title": "Volumetric Avatar Reconstruction with Spatio-Temporally Offset RGBD Cameras", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a072/1MNgmRWwNUI", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797819", "title": "Localizing Teleoperator Gaze in 360&#x00B0; Hosted Telepresence", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797819/1cJ1d3MdShi", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797719", "title": "The Effect of Avatar Appearance on Social Presence in an Augmented Reality Remote Collaboration", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797719/1cJ1dVsXQDS", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798152", "title": "The Influence of Size in Augmented Reality Telepresence Avatars", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798152/1cJ1djEUmv6", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/05/08998305", "title": "Avatar and Sense of Embodiment: Studying the Relative Preference Between Appearance, Control and Point of View", "doi": null, "abstractUrl": "/journal/tg/2020/05/08998305/1hpPBuW1ahy", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089654", "title": "Effects of Locomotion Style and Body Visibility of a Telepresence Avatar", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089654/1jIxd00PzX2", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/03/09173828", "title": "Placement Retargeting of Virtual Avatars to Dissimilar Indoor Environments", "doi": null, "abstractUrl": "/journal/tg/2022/03/09173828/1mtsbpUceNG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/07/09257094", "title": "Output-Sensitive Avatar Representations for Immersive Telepresence", "doi": null, "abstractUrl": "/journal/tg/2022/07/09257094/1oFCABrJUmA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09523841", "articleId": "1wpqtpOgOqI", "__typename": "AdjacentArticleType" }, "next": { "fno": "09523889", "articleId": "1wpqwAIMiRy", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1y2FqFx5SCI", "name": "ttg202111-09523831s1-supp1-3106480.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202111-09523831s1-supp1-3106480.mp4", "extension": "mp4", "size": "25.5 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1y2FkV9ZFKM", "title": "Nov.", "year": "2021", "issueNum": "11", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Nov.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1wpqwAIMiRy", "doi": "10.1109/TVCG.2021.3106494", "abstract": "Accurately modelling user behaviour has the potential to significantly improve the quality of human-computer interaction. Traditionally, these models are carefully hand-crafted to approximate specific aspects of well-documented user behaviour. This limits their availability in virtual and augmented reality where user behaviour is often not yet well understood. Recent efforts have demonstrated that reinforcement learning can approximate human behaviour during simple goal-oriented reaching tasks. We build on these efforts and demonstrate that reinforcement learning can also approximate user behaviour in a complex mid-air interaction task: typing on a virtual keyboard. We present the first reinforcement learning-based user model for mid-air and surface-aligned typing on a virtual keyboard. Our model is shown to replicate high-level human typing behaviour. We demonstrate that this approach may be used to augment or replace human testing during the validation and development of virtual keyboards.", "abstracts": [ { "abstractType": "Regular", "content": "Accurately modelling user behaviour has the potential to significantly improve the quality of human-computer interaction. Traditionally, these models are carefully hand-crafted to approximate specific aspects of well-documented user behaviour. This limits their availability in virtual and augmented reality where user behaviour is often not yet well understood. Recent efforts have demonstrated that reinforcement learning can approximate human behaviour during simple goal-oriented reaching tasks. We build on these efforts and demonstrate that reinforcement learning can also approximate user behaviour in a complex mid-air interaction task: typing on a virtual keyboard. We present the first reinforcement learning-based user model for mid-air and surface-aligned typing on a virtual keyboard. Our model is shown to replicate high-level human typing behaviour. We demonstrate that this approach may be used to augment or replace human testing during the validation and development of virtual keyboards.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Accurately modelling user behaviour has the potential to significantly improve the quality of human-computer interaction. Traditionally, these models are carefully hand-crafted to approximate specific aspects of well-documented user behaviour. This limits their availability in virtual and augmented reality where user behaviour is often not yet well understood. Recent efforts have demonstrated that reinforcement learning can approximate human behaviour during simple goal-oriented reaching tasks. We build on these efforts and demonstrate that reinforcement learning can also approximate user behaviour in a complex mid-air interaction task: typing on a virtual keyboard. We present the first reinforcement learning-based user model for mid-air and surface-aligned typing on a virtual keyboard. Our model is shown to replicate high-level human typing behaviour. We demonstrate that this approach may be used to augment or replace human testing during the validation and development of virtual keyboards.", "title": "Complex Interaction as Emergent Behaviour: Simulating Mid-Air Virtual Keyboard Typing using Reinforcement Learning", "normalizedTitle": "Complex Interaction as Emergent Behaviour: Simulating Mid-Air Virtual Keyboard Typing using Reinforcement Learning", "fno": "09523889", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Augmented Reality", "Learning Artificial Intelligence", "User Modelling", "Reinforcement Learning Based User Model", "High Level Human Typing Behaviour", "Human Testing", "Complex Interaction", "Emergent Behaviour", "Mid Air Virtual Keyboard Typing", "Accurately Modelling User Behaviour", "Human Computer Interaction", "Approximate Specific Aspects", "Well Documented User Behaviour", "Virtual Reality", "Augmented Reality", "Simple Goal Oriented", "Mid Air Interaction Task", "Keyboards", "Biological System Modeling", "Task Analysis", "Reinforcement Learning", "Computational Modeling", "Solid Modeling", "Biomechanics", "Reinforcement Learning", "Virtual Reality", "User Model" ], "authors": [ { "givenName": "Lorenz", "surname": "Hetzel", "fullName": "Lorenz Hetzel", "affiliation": "University of Cambridge, USA", "__typename": "ArticleAuthorType" }, { "givenName": "John", "surname": "Dudley", "fullName": "John Dudley", "affiliation": "University of Cambridge, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Anna Maria", "surname": "Feit", "fullName": "Anna Maria Feit", "affiliation": "Saarland University, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Per Ola", "surname": "Kristensson", "fullName": "Per Ola Kristensson", "affiliation": "University of Cambridge, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "11", "pubDate": "2021-11-01 00:00:00", "pubType": "trans", "pages": "4140-4149", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icpads/2014/7615/0/07097812", "title": "Virtual keyboard for head mounted display-based wearable devices", "doi": null, "abstractUrl": "/proceedings-article/icpads/2014/07097812/12OmNqzu6VX", "parentPublication": { "id": "proceedings/icpads/2014/7615/0", "title": "2014 20th IEEE International Conference on Parallel and Distributed Systems (ICPADS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/2017/1710/0/1710a787", "title": "Analyzing the Impact of Cognitive Load in Evaluating Gaze-Based Typing", "doi": null, "abstractUrl": "/proceedings-article/cbms/2017/1710a787/12OmNx1IwaL", "parentPublication": { "id": "proceedings/cbms/2017/1710/0", "title": "2017 IEEE 30th International Symposium on Computer-Based Medical Systems (CBMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2017/0563/0/08273592", "title": "Evaluating effectiveness of smartphone typing as an indicator of user emotion", "doi": null, "abstractUrl": "/proceedings-article/acii/2017/08273592/12OmNz61d2O", "parentPublication": { "id": "proceedings/acii/2017/0563/0", "title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a694", "title": "From 2D to 3D: Facilitating Single-Finger Mid-Air Typing on Virtual Keyboards with Probabilistic Touch Modeling", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a694/1CJf9WRhN84", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a676", "title": "AiRType: An Air-tapping Keyboard for Augmented Reality Environments", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a676/1CJfr9wrq1i", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a008", "title": "Exploring the Impact of Visual Information on Intermittent Typing in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a008/1JrR2KZbVXq", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798238", "title": "Text Typing in VR Using Smartphones Touchscreen and HMD", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798238/1cJ0Qw94bi8", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigmm/2020/9325/0/09232570", "title": "Touchless Typing Using Head Movement-based Gestures", "doi": null, "abstractUrl": "/proceedings-article/bigmm/2020/09232570/1o56Avh0Bhu", "parentPublication": { "id": "proceedings/bigmm/2020/9325/0", "title": "2020 IEEE Sixth International Conference on Multimedia Big Data (BigMM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a393", "title": "Simulating Realistic Human Motion Trajectories of Mid-Air Gesture Typing", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a393/1yeCVRK9bri", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a510", "title": "MusiKeys: Investigating Auditory-Physical Feedback Replacement Technique for Mid-air Typing", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a510/1yeQWHyOQes", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09523831", "articleId": "1wpqru2GjIY", "__typename": "AdjacentArticleType" }, "next": { "fno": "09523834", "articleId": "1wpqnjOfx60", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1y2FwaNLpnO", "name": "ttg202111-09523889s1-supp1-3106494.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202111-09523889s1-supp1-3106494.mp4", "extension": "mp4", "size": "25.8 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1y2FkV9ZFKM", "title": "Nov.", "year": "2021", "issueNum": "11", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Nov.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1wpqnjOfx60", "doi": "10.1109/TVCG.2021.3106478", "abstract": "In recent years, medical simulators have been widely applied to a broad range of surgery training tasks. However, most of the existing surgery simulators can only provide limited immersive environments with a few pre-processed organ models, while ignoring the instant modeling of various personalized clinical cases, which brings substantive differences between training experiences and real surgery situations. To this end, we present a virtual reality (VR) based surgery simulation system for personalized percutaneous coronary intervention (PCI). The simulation system can directly take patient-specific clinical data as input and generate virtual 3D intervention scenarios. Specially, we introduce a fiber-based patient-specific cardiac dynamic model to simulate the nonlinear deformation among the multiple layers of the cardiac structure, which can well respect and correlate the atriums, ventricles and vessels, and thus gives rise to more effective visualization and interaction. Meanwhile, we design a tracking and haptic feedback hardware, which can enable users to manipulate physical intervention instruments and interact with virtual scenarios. We conduct quantitative analysis on deformation precision and modeling efficiency, and evaluate the simulation system based on the user studies from 16 cardiologists and 20 intervention trainees, comparing it to traditional desktop intervention simulators. The results confirm that our simulation system can provide a better user experience, and is a suitable platform for PCI surgery training and rehearsal.", "abstracts": [ { "abstractType": "Regular", "content": "In recent years, medical simulators have been widely applied to a broad range of surgery training tasks. However, most of the existing surgery simulators can only provide limited immersive environments with a few pre-processed organ models, while ignoring the instant modeling of various personalized clinical cases, which brings substantive differences between training experiences and real surgery situations. To this end, we present a virtual reality (VR) based surgery simulation system for personalized percutaneous coronary intervention (PCI). The simulation system can directly take patient-specific clinical data as input and generate virtual 3D intervention scenarios. Specially, we introduce a fiber-based patient-specific cardiac dynamic model to simulate the nonlinear deformation among the multiple layers of the cardiac structure, which can well respect and correlate the atriums, ventricles and vessels, and thus gives rise to more effective visualization and interaction. Meanwhile, we design a tracking and haptic feedback hardware, which can enable users to manipulate physical intervention instruments and interact with virtual scenarios. We conduct quantitative analysis on deformation precision and modeling efficiency, and evaluate the simulation system based on the user studies from 16 cardiologists and 20 intervention trainees, comparing it to traditional desktop intervention simulators. The results confirm that our simulation system can provide a better user experience, and is a suitable platform for PCI surgery training and rehearsal.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In recent years, medical simulators have been widely applied to a broad range of surgery training tasks. However, most of the existing surgery simulators can only provide limited immersive environments with a few pre-processed organ models, while ignoring the instant modeling of various personalized clinical cases, which brings substantive differences between training experiences and real surgery situations. To this end, we present a virtual reality (VR) based surgery simulation system for personalized percutaneous coronary intervention (PCI). The simulation system can directly take patient-specific clinical data as input and generate virtual 3D intervention scenarios. Specially, we introduce a fiber-based patient-specific cardiac dynamic model to simulate the nonlinear deformation among the multiple layers of the cardiac structure, which can well respect and correlate the atriums, ventricles and vessels, and thus gives rise to more effective visualization and interaction. Meanwhile, we design a tracking and haptic feedback hardware, which can enable users to manipulate physical intervention instruments and interact with virtual scenarios. We conduct quantitative analysis on deformation precision and modeling efficiency, and evaluate the simulation system based on the user studies from 16 cardiologists and 20 intervention trainees, comparing it to traditional desktop intervention simulators. The results confirm that our simulation system can provide a better user experience, and is a suitable platform for PCI surgery training and rehearsal.", "title": "Design and Evaluation of Personalized Percutaneous Coronary Intervention Surgery Simulation System", "normalizedTitle": "Design and Evaluation of Personalized Percutaneous Coronary Intervention Surgery Simulation System", "fno": "09523834", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Blood Vessels", "Cardiology", "Computer Based Training", "Haptic Interfaces", "Medical Computing", "Medical Image Processing", "Physiological Models", "Surgery", "Virtual Reality", "Surgery Training Tasks", "Pre Processed Organ Models", "Personalized Clinical Cases", "Training Experiences", "Surgery Situations", "Virtual Reality Based Surgery Simulation System", "Patient Specific Clinical Data", "Virtual 3 D Intervention Scenarios", "Fiber Based Patient Specific Cardiac Dynamic Model", "Physical Intervention Instruments", "Personalized Percutaneous Coronary Intervention Surgery Simulation System", "Medical Simulators", "Desktop Intervention Simulators", "Training", "Surgery", "Solid Modeling", "Data Models", "Computational Modeling", "Deformable Models", "Strain", "Virtual Surgery", "Personalized Surgery Simulation", "Dynamic Modeling", "Haptic Force Feedback Hardware", "Cardiovascular Intervention" ], "authors": [ { "givenName": "Shuai", "surname": "Li", "fullName": "Shuai Li", "affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, Beihang University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Jiahao", "surname": "Cui", "fullName": "Jiahao Cui", "affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, Beihang University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Aimin", "surname": "Hao", "fullName": "Aimin Hao", "affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, Beihang University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Shuyang", "surname": "Zhang", "fullName": "Shuyang Zhang", "affiliation": "Peking Union Medical College Hospital, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Qinping", "surname": "Zhao", "fullName": "Qinping Zhao", "affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, Beihang University, Beijing, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "11", "pubDate": "2021-11-01 00:00:00", "pubType": "trans", "pages": "4150-4160", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2013/2869/0/06671780", "title": "Image-guided simulation of heterogeneous tissue deformation for augmented reality during hepatic surgery", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671780/12OmNAtK4hi", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cad-graphics/2015/8020/0/07450404", "title": "Novel, Robust, and Efficient Guidewire Modeling for PCI Surgery Simulator Based on Heterogeneous and Integrated Chain-Mails", "doi": null, "abstractUrl": "/proceedings-article/cad-graphics/2015/07450404/12OmNC2fGtO", "parentPublication": { "id": "proceedings/cad-graphics/2015/8020/0", "title": "2015 14th International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2017/4283/0/4283a083", "title": "Cardiac Surgery Rehabilitation System (CSRS) for a Personalized Support to Patients", "doi": null, "abstractUrl": "/proceedings-article/sitis/2017/4283a083/12OmNCga1NG", "parentPublication": { "id": "proceedings/sitis/2017/4283/0", "title": "2017 13th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hisb/2012/4921/0/4921a031", "title": "Predicting Complications of Percutaneous Coronary Intervention Using a Novel Support Vector Method", "doi": null, "abstractUrl": "/proceedings-article/hisb/2012/4921a031/12OmNvSKO0m", "parentPublication": { "id": "proceedings/hisb/2012/4921/0", "title": "Healthcare Informatics, Imaging and Systems Biology, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/1999/01/v0062", "title": "Real-Time Elastic Deformations of Soft Tissues for Surgery Simulation", "doi": null, "abstractUrl": "/journal/tg/1999/01/v0062/13rRUxBa5nf", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/05/06987340", "title": "Impact of Soft Tissue Heterogeneity on Augmented Reality for Liver Surgery", "doi": null, "abstractUrl": "/journal/tg/2015/05/06987340/13rRUyuegp8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cmbs/2022/6770/0/677000a234", "title": "Predicting the Onset of Delirium on Hourly Basis in an Intensive Care Unit Following Cardiac Surgery", "doi": null, "abstractUrl": "/proceedings-article/cmbs/2022/677000a234/1GhW5drgeRy", "parentPublication": { "id": "proceedings/cmbs/2022/6770/0", "title": "2022 IEEE 35th International Symposium on Computer-Based Medical Systems (CBMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2017/2636/0/263600a168", "title": "Research on Interaction of Exposure Operation in Virtual Surgery", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2017/263600a168/1ap5ASodZde", "parentPublication": { "id": "proceedings/icvrv/2017/2636/0", "title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2017/2636/0/263600a354", "title": "Kinetic Simulation of Cardiac Motion with Patient-Specific Coronary Artery Vessels Attached for PCI Simulator", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2017/263600a354/1ap5BWOVCzm", "parentPublication": { "id": "proceedings/icvrv/2017/2636/0", "title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2019/1867/0/08983143", "title": "Predicting 30 days Mortality in STEMI Patients using Patient Referral Data to a Primary Percutaneous Coronary Intervention Service", "doi": null, "abstractUrl": "/proceedings-article/bibm/2019/08983143/1hguhrPs6vS", "parentPublication": { "id": "proceedings/bibm/2019/1867/0", "title": "2019 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09523889", "articleId": "1wpqwAIMiRy", "__typename": "AdjacentArticleType" }, "next": { "fno": "09523844", "articleId": "1wpqmnzDSzm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1y2FpASLv1u", "name": "ttg202111-09523834s1-supp1-3106478.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202111-09523834s1-supp1-3106478.mp4", "extension": "mp4", "size": "52.9 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1y2FkV9ZFKM", "title": "Nov.", "year": "2021", "issueNum": "11", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Nov.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1wpqmnzDSzm", "doi": "10.1109/TVCG.2021.3106511", "abstract": "Intrinsic projector calibration is essential in projection mapping (PM) applications, especially in dynamic PM. However, due to the shallow depth-of-field (DOF) of a projector, more work is needed to ensure accurate calibration. We aim to estimate the intrinsic parameters of a projector while avoiding the limitation of shallow DOF. As the core of our technique, we present a practical calibration device that requires a minimal working volume directly in front of the projector lens regardless of the projector&#x0027;s focusing distance and aperture size. The device consists of a flat-bed scanner and pinhole-array masks. For calibration, a projector projects a series of structured light patterns in the device. The pinholes directionally decompose the structured light, and only the projected rays that pass through the pinholes hit the scanner plane. For each pinhole, we extract a ray passing through the optical center of the projector. Consequently, we regard the projector as a pinhole projector that projects the extracted rays only, and we calibrate the projector by applying the standard camera calibration technique, which assumes a pinhole camera model. Using a proof-of-concept prototype, we demonstrate that our technique can calibrate projectors with different focusing distances and aperture sizes at the same accuracy as a conventional method. Finally, we confirm that our technique can provide intrinsic parameters accurate enough for a dynamic PM application, even when a projector is placed too far from a projection target for a conventional method to calibrate the projector using a fiducial object of reasonable size.", "abstracts": [ { "abstractType": "Regular", "content": "Intrinsic projector calibration is essential in projection mapping (PM) applications, especially in dynamic PM. However, due to the shallow depth-of-field (DOF) of a projector, more work is needed to ensure accurate calibration. We aim to estimate the intrinsic parameters of a projector while avoiding the limitation of shallow DOF. As the core of our technique, we present a practical calibration device that requires a minimal working volume directly in front of the projector lens regardless of the projector&#x0027;s focusing distance and aperture size. The device consists of a flat-bed scanner and pinhole-array masks. For calibration, a projector projects a series of structured light patterns in the device. The pinholes directionally decompose the structured light, and only the projected rays that pass through the pinholes hit the scanner plane. For each pinhole, we extract a ray passing through the optical center of the projector. Consequently, we regard the projector as a pinhole projector that projects the extracted rays only, and we calibrate the projector by applying the standard camera calibration technique, which assumes a pinhole camera model. Using a proof-of-concept prototype, we demonstrate that our technique can calibrate projectors with different focusing distances and aperture sizes at the same accuracy as a conventional method. Finally, we confirm that our technique can provide intrinsic parameters accurate enough for a dynamic PM application, even when a projector is placed too far from a projection target for a conventional method to calibrate the projector using a fiducial object of reasonable size.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Intrinsic projector calibration is essential in projection mapping (PM) applications, especially in dynamic PM. However, due to the shallow depth-of-field (DOF) of a projector, more work is needed to ensure accurate calibration. We aim to estimate the intrinsic parameters of a projector while avoiding the limitation of shallow DOF. As the core of our technique, we present a practical calibration device that requires a minimal working volume directly in front of the projector lens regardless of the projector's focusing distance and aperture size. The device consists of a flat-bed scanner and pinhole-array masks. For calibration, a projector projects a series of structured light patterns in the device. The pinholes directionally decompose the structured light, and only the projected rays that pass through the pinholes hit the scanner plane. For each pinhole, we extract a ray passing through the optical center of the projector. Consequently, we regard the projector as a pinhole projector that projects the extracted rays only, and we calibrate the projector by applying the standard camera calibration technique, which assumes a pinhole camera model. Using a proof-of-concept prototype, we demonstrate that our technique can calibrate projectors with different focusing distances and aperture sizes at the same accuracy as a conventional method. Finally, we confirm that our technique can provide intrinsic parameters accurate enough for a dynamic PM application, even when a projector is placed too far from a projection target for a conventional method to calibrate the projector using a fiducial object of reasonable size.", "title": "Directionally Decomposing Structured Light for Projector Calibration", "normalizedTitle": "Directionally Decomposing Structured Light for Projector Calibration", "fno": "09523844", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Calibration", "Cameras", "Image Reconstruction", "Lenses", "Optical Projectors", "Practical Calibration Device", "Projector Lens", "Pinhole Projector", "Standard Camera Calibration Technique", "Intrinsic Projector Calibration", "Calibration", "Cameras", "Lenses", "Focusing", "Apertures", "Shape", "Prototypes", "Projector Calibration", "Projection Mapping", "Spatial Augmented Reality" ], "authors": [ { "givenName": "Masatoki", "surname": "Sugimoto", "fullName": "Masatoki Sugimoto", "affiliation": "Graduate School of Engineering Science, Osaka University, Osaka, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Daisuke", "surname": "Iwai", "fullName": "Daisuke Iwai", "affiliation": "Graduate School of Engineering Science, Osaka University, Osaka, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Koki", "surname": "Ishida", "fullName": "Koki Ishida", "affiliation": "Graduate School of Engineering Science, Osaka University, Osaka, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Parinya", "surname": "Punpongsanon", "fullName": "Parinya Punpongsanon", "affiliation": "Graduate School of Engineering Science, Osaka University, Osaka, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Kosuke", "surname": "Sato", "fullName": "Kosuke Sato", "affiliation": "Graduate School of Engineering Science, Osaka University, Osaka, Japan", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "11", "pubDate": "2021-11-01 00:00:00", "pubType": "trans", "pages": "4161-4170", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2016/3641/0/3641a063", "title": "Practical and Precise Projector-Camera Calibration", "doi": null, "abstractUrl": "/proceedings-article/ismar/2016/3641a063/12OmNB7cjhR", "parentPublication": { "id": "proceedings/ismar/2016/3641/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2011/0529/0/05981781", "title": "Simultaneous self-calibration of a projector and a camera using structured light", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2011/05981781/12OmNBzRNuv", "parentPublication": { "id": "proceedings/cvprw/2011/0529/0", "title": "CVPR 2011 WORKSHOPS", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2004/2128/1/212810014", "title": "Auto-Calibration of Multi-Projector Display Walls", "doi": null, "abstractUrl": "/proceedings-article/icpr/2004/212810014/12OmNCb3fwi", "parentPublication": { "id": "proceedings/icpr/2004/2128/1", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dimpvt/2012/4873/0/4873a464", "title": "Simple, Accurate, and Robust Projector-Camera Calibration", "doi": null, "abstractUrl": "/proceedings-article/3dimpvt/2012/4873a464/12OmNx0RIZY", "parentPublication": { "id": "proceedings/3dimpvt/2012/4873/0", "title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2007/1179/0/04270475", "title": "Projector Calibration using Arbitrary Planes and Calibrated Camera", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2007/04270475/12OmNxYtu7r", "parentPublication": { "id": "proceedings/cvpr/2007/1179/0", "title": "2007 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391d568", "title": "Active One-Shot Scan for Wide Depth Range Using a Light Field Projector Based on Coded Aperture", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391d568/12OmNxdm4Cp", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457a125", "title": "A Practical Method for Fully Automatic Intrinsic Camera Calibration Using Directionally Encoded Light", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457a125/12OmNxxdZCj", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109a320", "title": "Active Calibration of Camera-Projector Systems Based on Planar Homography", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109a320/12OmNzDehgc", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446433", "title": "A Calibration Method for Large-Scale Projection Based Floor Display System", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446433/13bd1gJ1v0M", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/05/v1101", "title": "Asynchronous Distributed Calibration for Scalable and Reconfigurable Multi-Projector Displays", "doi": null, "abstractUrl": "/journal/tg/2006/05/v1101/13rRUwInvJ9", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09523834", "articleId": "1wpqnjOfx60", "__typename": "AdjacentArticleType" }, "next": { "fno": "09523846", "articleId": "1wpqw9G3Lws", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1y2FoDyjIpq", "name": "ttg202111-09523844s1-supp1-3106511.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202111-09523844s1-supp1-3106511.mp4", "extension": "mp4", "size": "25.4 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1y2FkV9ZFKM", "title": "Nov.", "year": "2021", "issueNum": "11", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Nov.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1wpqw9G3Lws", "doi": "10.1109/TVCG.2021.3106431", "abstract": "With the continuing development of affordable immersive virtual reality (VR) systems, there is now a growing market for consumer content. The current form of consumer systems is not dissimilar to the lab-based VR systems of the past 30 years: the primary input mechanism is a head-tracked display and one or two tracked hands with buttons and joysticks on hand-held controllers. Over those 30 years, a very diverse academic literature has emerged that covers design and ergonomics of 3D user interfaces (3DUIs). However, the growing consumer market has engaged a very broad range of creatives that have built a very diverse set of designs. Sometimes these designs adopt findings from the academic literature, but other times they experiment with completely novel or counter-intuitive mechanisms. In this paper and its online adjunct, we report on novel 3DUI design patterns that are interesting from both design and research perspectives: they are highly novel, potentially broadly re-usable and/or suggest interesting avenues for evaluation. The supplemental material, which is a living document, is a crowd-sourced repository of interesting patterns. This paper is a curated snapshot of those patterns that were considered to be the most fruitful for further elaboration.", "abstracts": [ { "abstractType": "Regular", "content": "With the continuing development of affordable immersive virtual reality (VR) systems, there is now a growing market for consumer content. The current form of consumer systems is not dissimilar to the lab-based VR systems of the past 30 years: the primary input mechanism is a head-tracked display and one or two tracked hands with buttons and joysticks on hand-held controllers. Over those 30 years, a very diverse academic literature has emerged that covers design and ergonomics of 3D user interfaces (3DUIs). However, the growing consumer market has engaged a very broad range of creatives that have built a very diverse set of designs. Sometimes these designs adopt findings from the academic literature, but other times they experiment with completely novel or counter-intuitive mechanisms. In this paper and its online adjunct, we report on novel 3DUI design patterns that are interesting from both design and research perspectives: they are highly novel, potentially broadly re-usable and/or suggest interesting avenues for evaluation. The supplemental material, which is a living document, is a crowd-sourced repository of interesting patterns. This paper is a curated snapshot of those patterns that were considered to be the most fruitful for further elaboration.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "With the continuing development of affordable immersive virtual reality (VR) systems, there is now a growing market for consumer content. The current form of consumer systems is not dissimilar to the lab-based VR systems of the past 30 years: the primary input mechanism is a head-tracked display and one or two tracked hands with buttons and joysticks on hand-held controllers. Over those 30 years, a very diverse academic literature has emerged that covers design and ergonomics of 3D user interfaces (3DUIs). However, the growing consumer market has engaged a very broad range of creatives that have built a very diverse set of designs. Sometimes these designs adopt findings from the academic literature, but other times they experiment with completely novel or counter-intuitive mechanisms. In this paper and its online adjunct, we report on novel 3DUI design patterns that are interesting from both design and research perspectives: they are highly novel, potentially broadly re-usable and/or suggest interesting avenues for evaluation. The supplemental material, which is a living document, is a crowd-sourced repository of interesting patterns. This paper is a curated snapshot of those patterns that were considered to be the most fruitful for further elaboration.", "title": "Directions for 3D User Interface Research from Consumer VR Games", "normalizedTitle": "Directions for 3D User Interface Research from Consumer VR Games", "fno": "09523846", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Games", "Ergonomics", "Interactive Devices", "User Interfaces", "Virtual Reality", "Tracked Hands", "Buttons", "Joysticks", "Hand Held Controllers", "Ergonomics", "3 D User Interfaces", "Consumer Market", "Counter Intuitive Mechanisms", "Consumer VR Games", "Affordable Immersive Virtual Reality Systems", "Consumer Content", "Consumer Systems", "Lab Based VR Systems", "Primary Input Mechanism", "Head Tracked Display", "3 DUI Design Patterns", "Games", "User Interfaces", "Three Dimensional Displays", "Control Systems", "Visualization", "Standards", "Guidelines", "Virtual Reality", "3 D User Interfaces", "Games", "Interaction Patterns", "Consumer Head Mounted Displays" ], "authors": [ { "givenName": "Anthony", "surname": "Steed", "fullName": "Anthony Steed", "affiliation": "Department of Computer Science, University College London, UK", "__typename": "ArticleAuthorType" }, { "givenName": "Tuukka M.", "surname": "Takala", "fullName": "Tuukka M. Takala", "affiliation": "Aalto University, Finland", "__typename": "ArticleAuthorType" }, { "givenName": "Daniel", "surname": "Archer", "fullName": "Daniel Archer", "affiliation": "Department of Computer Science, University College London, UK", "__typename": "ArticleAuthorType" }, { "givenName": "Wallace", "surname": "Lages", "fullName": "Wallace Lages", "affiliation": "School of Visual Arts, Virginia Tech., USA", "__typename": "ArticleAuthorType" }, { "givenName": "Robert W.", "surname": "Lindeman", "fullName": "Robert W. Lindeman", "affiliation": "HIT Lab NZ, University of Canterbury, New Zealand", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "11", "pubDate": "2021-11-01 00:00:00", "pubType": "trans", "pages": "4171-4182", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892348", "title": "Steering locomotion by vestibular perturbation in room-scale VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892348/12OmNvrMUgU", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446619", "title": "Touchless Haptic Feedback for VR Rhythm Games", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446619/13bd1fKQxqX", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446277", "title": "Batmen Forever: Unified Virtual Hand Metaphor for Consumer VR Setups", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446277/13bd1fWcuD9", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a247", "title": "From attention to action: Key drivers to augment VR experience for everyday consumer applications", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a247/1CJelwYgfOE", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2022/9519/0/951900a360", "title": "VR Empathy Game: Creating Empathic VR Environments for Children Based on a Social Constructivist Learning Approach", "doi": null, "abstractUrl": "/proceedings-article/icalt/2022/951900a360/1FUUbmWjuLu", "parentPublication": { "id": "proceedings/icalt/2022/9519/0", "title": "2022 International Conference on Advanced Learning Technologies (ICALT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a538", "title": "CardsVR: A Two-Person VR Experience with Passive Haptic Feedback from a Deck of Playing Cards", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a538/1JrRaySJ7So", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbgames/2020/8432/0/843200a071", "title": "Evaluation of Graphical User Interfaces Guidelines for Virtual Reality Games", "doi": null, "abstractUrl": "/proceedings-article/sbgames/2020/843200a071/1pQIKqrlF0k", "parentPublication": { "id": "proceedings/sbgames/2020/8432/0", "title": "2020 19th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a458", "title": "Play with Emotional Characters: Improving User Emotional Experience by A Data-driven Approach in VR Volleyball Games", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a458/1tnWZju755K", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a434", "title": "A-Visor and A-Camera: Arduino-based Cardboard Head-Mounted Controllers for VR Games", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a434/1tnWy6iYjMk", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2021/3864/0/09428306", "title": "The Impact of Black Edge Artifact on User Experience for the Interactive Cloud VR Services", "doi": null, "abstractUrl": "/proceedings-article/icme/2021/09428306/1uilNtz7Mha", "parentPublication": { "id": "proceedings/icme/2021/3864/0", "title": "2021 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09523844", "articleId": "1wpqmnzDSzm", "__typename": "AdjacentArticleType" }, "next": { "fno": "09523836", "articleId": "1wpquR1qr1S", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1y2FkV9ZFKM", "title": "Nov.", "year": "2021", "issueNum": "11", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Nov.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1wpquR1qr1S", "doi": "10.1109/TVCG.2021.3106488", "abstract": "Virtual reality (VR) applications require high-performance rendering algorithms to efficiently render 3D scenes on the VR head-mounted display, to provide users with an immersive and interactive virtual environment. Foveated rendering provides a solution to improve the performance of rendering algorithms by allocating computing resources to different regions based on the human visual acuity, and renders images of different qualities in different regions. Rasterization-based methods and ray tracing methods can be directly applied to foveated rendering, but rasterization-based methods are difficult to estimate global illumination (GI), and ray tracing methods are inefficient for rendering scenes that contain paths with low probability. Photon mapping is an efficient GI rendering method for scenes with different materials. However, since photon mapping cannot dynamically adjust the rendering quality of GI according to the human acuity, it cannot be directly applied to foveated rendering. In this paper, we propose a foveated photon mapping method to render realistic GI effects in the foveal region. We use the foveated photon tracing method to generate photons with high density in the foveal region, and these photons are used to render high-quality images in the foveal region. We further propose a temporal photon management to select and update the valid foveated photons of the previous frame for improving our method&#x0027;s performance. Our method can render diffuse, specular, glossy and transparent materials to achieve effects specifically related to GI, such as color bleeding, specular reflection, glossy reflection and caustics. Our method supports dynamic scenes and renders high-quality GI in the foveal region at interactive rates.", "abstracts": [ { "abstractType": "Regular", "content": "Virtual reality (VR) applications require high-performance rendering algorithms to efficiently render 3D scenes on the VR head-mounted display, to provide users with an immersive and interactive virtual environment. Foveated rendering provides a solution to improve the performance of rendering algorithms by allocating computing resources to different regions based on the human visual acuity, and renders images of different qualities in different regions. Rasterization-based methods and ray tracing methods can be directly applied to foveated rendering, but rasterization-based methods are difficult to estimate global illumination (GI), and ray tracing methods are inefficient for rendering scenes that contain paths with low probability. Photon mapping is an efficient GI rendering method for scenes with different materials. However, since photon mapping cannot dynamically adjust the rendering quality of GI according to the human acuity, it cannot be directly applied to foveated rendering. In this paper, we propose a foveated photon mapping method to render realistic GI effects in the foveal region. We use the foveated photon tracing method to generate photons with high density in the foveal region, and these photons are used to render high-quality images in the foveal region. We further propose a temporal photon management to select and update the valid foveated photons of the previous frame for improving our method&#x0027;s performance. Our method can render diffuse, specular, glossy and transparent materials to achieve effects specifically related to GI, such as color bleeding, specular reflection, glossy reflection and caustics. Our method supports dynamic scenes and renders high-quality GI in the foveal region at interactive rates.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Virtual reality (VR) applications require high-performance rendering algorithms to efficiently render 3D scenes on the VR head-mounted display, to provide users with an immersive and interactive virtual environment. Foveated rendering provides a solution to improve the performance of rendering algorithms by allocating computing resources to different regions based on the human visual acuity, and renders images of different qualities in different regions. Rasterization-based methods and ray tracing methods can be directly applied to foveated rendering, but rasterization-based methods are difficult to estimate global illumination (GI), and ray tracing methods are inefficient for rendering scenes that contain paths with low probability. Photon mapping is an efficient GI rendering method for scenes with different materials. However, since photon mapping cannot dynamically adjust the rendering quality of GI according to the human acuity, it cannot be directly applied to foveated rendering. In this paper, we propose a foveated photon mapping method to render realistic GI effects in the foveal region. We use the foveated photon tracing method to generate photons with high density in the foveal region, and these photons are used to render high-quality images in the foveal region. We further propose a temporal photon management to select and update the valid foveated photons of the previous frame for improving our method's performance. Our method can render diffuse, specular, glossy and transparent materials to achieve effects specifically related to GI, such as color bleeding, specular reflection, glossy reflection and caustics. Our method supports dynamic scenes and renders high-quality GI in the foveal region at interactive rates.", "title": "Foveated Photon Mapping", "normalizedTitle": "Foveated Photon Mapping", "fno": "09523836", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Image Sensors", "Ray Tracing", "Rendering Computer Graphics", "Virtual Reality", "Virtual Reality Applications", "High Performance Rendering Algorithms", "VR Head Mounted Display", "Immersive Environment", "Interactive Virtual Environment", "Foveated Rendering", "Renders Images", "Rasterization Based Methods", "Ray Tracing Methods", "Rendering Scenes", "Efficient GI Rendering Method", "Rendering Quality", "Foveated Photon Mapping Method", "Foveal Region", "Foveated Photon Tracing Method", "High Quality Images", "Temporal Photon Management", "Valid Foveated Photons", "Renders High Quality GI", "Photonics", "Rendering Computer Graphics", "Visualization", "Three Dimensional Displays", "Lighting", "Spatial Resolution", "Ray Tracing", "Virtual Reality", "Foveated Rendering", "Photon Mapping" ], "authors": [ { "givenName": "Xuehuai", "surname": "Shi", "fullName": "Xuehuai Shi", "affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, School of Computer Science and Engineering, Beihang University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Lili", "surname": "Wang", "fullName": "Lili Wang", "affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, Beihang University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xiaoheng", "surname": "Wei", "fullName": "Xiaoheng Wei", "affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, School of Computer Science and Engineering, Beihang University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Ling-Qi", "surname": "Yan", "fullName": "Ling-Qi Yan", "affiliation": "University of California, Santa Barbara, California, U.S.", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "11", "pubDate": "2021-11-01 00:00:00", "pubType": "trans", "pages": "4183-4193", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iiki/2016/5952/0/5952a022", "title": "Distributed Global Illumination Method Based on Photon Mapping", "doi": null, "abstractUrl": "/proceedings-article/iiki/2016/5952a022/12OmNBubOQf", "parentPublication": { "id": "proceedings/iiki/2016/5952/0", "title": "2016 International Conference on Identification, Information and Knowledge in the Internet of Things (IIKI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/etvis/2016/4731/0/07851170", "title": "An analysis of eye-tracking data in foveated ray tracing", "doi": null, "abstractUrl": "/proceedings-article/etvis/2016/07851170/12OmNvT2pjL", "parentPublication": { "id": "proceedings/etvis/2016/4731/0", "title": "2016 IEEE Second Workshop on Eye Tracking and Visualization (ETVIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2017/2937/0/2937a444", "title": "Towards Interactive and Realistic Rendering of 3D Fetal Ultrasound via Photon Mapping", "doi": null, "abstractUrl": "/proceedings-article/ism/2017/2937a444/12OmNwNwzLh", "parentPublication": { "id": "proceedings/ism/2017/2937/0", "title": "2017 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07534852", "title": "Correlated Photon Mapping for Interactive Global Illumination of Time-Varying Volumetric Data", "doi": null, "abstractUrl": "/journal/tg/2017/01/07534852/13rRUxZ0o1E", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/12/ttg2012122364", "title": "Historygrams: Enabling Interactive Global Illumination in Direct Volume Rendering using Photon Mapping", "doi": null, "abstractUrl": "/journal/tg/2012/12/ttg2012122364/13rRUyYjK5h", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/08/ttg2013081317", "title": "Real-Time Volume Rendering in Dynamic Lighting Environments Using Precomputed Photon Mapping", "doi": null, "abstractUrl": "/journal/tg/2013/08/ttg2013081317/13rRUynHuja", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a756", "title": "Rectangular Mapping-based Foveated Rendering", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a756/1CJcj9wHjH2", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a471", "title": "Locomotion-aware Foveated Rendering", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a471/1MNgzzb0RWg", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a001", "title": "Foveated Instant Radiosity", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a001/1pysxhw4Bqw", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a413", "title": "Selective Foveated Ray Tracing for Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a413/1yeD8bFOZos", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09523846", "articleId": "1wpqw9G3Lws", "__typename": "AdjacentArticleType" }, "next": { "fno": "09523842", "articleId": "1wpqr1B6wA8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1y2Ftho8O5y", "name": "ttg202111-09523836s1-supp1-3106488.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202111-09523836s1-supp1-3106488.mp4", "extension": "mp4", "size": "62.1 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1y2FkV9ZFKM", "title": "Nov.", "year": "2021", "issueNum": "11", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Nov.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1wpqr1B6wA8", "doi": "10.1109/TVCG.2021.3106433", "abstract": "Computer-generated holographic (CGH) displays show great potential and are emerging as the next-generation displays for augmented and virtual reality, and automotive heads-up displays. One of the critical problems harming the wide adoption of such displays is the presence of speckle noise inherent to holography, that compromises its quality by introducing perceptible artifacts. Although speckle noise suppression has been an active research area, the previous works have not considered the perceptual characteristics of the Human Visual System (HVS), which receives the final displayed imagery. However, it is well studied that the sensitivity of the HVS is not uniform across the visual field, which has led to gaze-contingent rendering schemes for maximizing the perceptual quality in various computer-generated imagery. Inspired by this, we present the first method that reduces the &#x201C;perceived speckle noise&#x201D; by integrating foveal and peripheral vision characteristics of the HVS, along with the retinal point spread function, into the phase hologram computation. Specifically, we introduce the anatomical and statistical retinal receptor distribution into our computational hologram optimization, which places a higher priority on reducing the perceived foveal speckle noise while being adaptable to any individual&#x0027;s optical aberration on the retina. Our method demonstrates superior perceptual quality on our emulated holographic display. Our evaluations with objective measurements and subjective studies demonstrate a significant reduction of the human perceived noise.", "abstracts": [ { "abstractType": "Regular", "content": "Computer-generated holographic (CGH) displays show great potential and are emerging as the next-generation displays for augmented and virtual reality, and automotive heads-up displays. One of the critical problems harming the wide adoption of such displays is the presence of speckle noise inherent to holography, that compromises its quality by introducing perceptible artifacts. Although speckle noise suppression has been an active research area, the previous works have not considered the perceptual characteristics of the Human Visual System (HVS), which receives the final displayed imagery. However, it is well studied that the sensitivity of the HVS is not uniform across the visual field, which has led to gaze-contingent rendering schemes for maximizing the perceptual quality in various computer-generated imagery. Inspired by this, we present the first method that reduces the &#x201C;perceived speckle noise&#x201D; by integrating foveal and peripheral vision characteristics of the HVS, along with the retinal point spread function, into the phase hologram computation. Specifically, we introduce the anatomical and statistical retinal receptor distribution into our computational hologram optimization, which places a higher priority on reducing the perceived foveal speckle noise while being adaptable to any individual&#x0027;s optical aberration on the retina. Our method demonstrates superior perceptual quality on our emulated holographic display. Our evaluations with objective measurements and subjective studies demonstrate a significant reduction of the human perceived noise.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Computer-generated holographic (CGH) displays show great potential and are emerging as the next-generation displays for augmented and virtual reality, and automotive heads-up displays. One of the critical problems harming the wide adoption of such displays is the presence of speckle noise inherent to holography, that compromises its quality by introducing perceptible artifacts. Although speckle noise suppression has been an active research area, the previous works have not considered the perceptual characteristics of the Human Visual System (HVS), which receives the final displayed imagery. However, it is well studied that the sensitivity of the HVS is not uniform across the visual field, which has led to gaze-contingent rendering schemes for maximizing the perceptual quality in various computer-generated imagery. Inspired by this, we present the first method that reduces the “perceived speckle noise” by integrating foveal and peripheral vision characteristics of the HVS, along with the retinal point spread function, into the phase hologram computation. Specifically, we introduce the anatomical and statistical retinal receptor distribution into our computational hologram optimization, which places a higher priority on reducing the perceived foveal speckle noise while being adaptable to any individual's optical aberration on the retina. Our method demonstrates superior perceptual quality on our emulated holographic display. Our evaluations with objective measurements and subjective studies demonstrate a significant reduction of the human perceived noise.", "title": "Gaze-Contingent Retinal Speckle Suppression for Perceptually-Matched Foveated Holographic Displays", "normalizedTitle": "Gaze-Contingent Retinal Speckle Suppression for Perceptually-Matched Foveated Holographic Displays", "fno": "09523842", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Biomedical Optical Imaging", "Computer Generated Holography", "Eye", "Holography", "Image Enhancement", "Image Reconstruction", "Image Resolution", "Medical Image Processing", "Optical Transfer Function", "Rendering Computer Graphics", "Speckle", "Virtual Reality", "Vision", "Visual Perception", "Gaze Contingent Retinal Speckle Suppression", "Perceptually Matched Foveated Holographic Displays", "Computer Generated Holographic Displays", "Next Generation Displays", "Augmented Reality", "Virtual Reality", "Automotive Heads Up", "Perceptible Artifacts", "Speckle Noise Suppression", "Perceptual Characteristics", "Human Visual System", "HVS", "Final Displayed Imagery", "Visual Field", "Gaze Contingent Rendering Schemes", "Computer Generated Imagery", "Perceived Speckle Noise", "Foveal Vision Characteristics", "Peripheral Vision Characteristics", "Retinal Point Spread Function", "Phase Hologram Computation", "Statistical Retinal Receptor Distribution", "Computational Hologram Optimization", "Perceived Foveal Speckle Noise", "Superior Perceptual Quality", "Emulated Holographic Display", "Human Perceived Noise", "Speckle", "Retina", "Image Quality", "Holography", "Phase Modulation", "Image Reconstruction", "Visualization", "Holograms", "Foveated Rendering", "Near Eye Immersive Displays" ], "authors": [ { "givenName": "Praneeth", "surname": "Chakravarthula", "fullName": "Praneeth Chakravarthula", "affiliation": "University of North Carolina, Chapel Hill, NC, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Zhan", "surname": "Zhang", "fullName": "Zhan Zhang", "affiliation": "University of Science and Technology of China, Hefei, China", "__typename": "ArticleAuthorType" }, { "givenName": "Okan", "surname": "Tursun", "fullName": "Okan Tursun", "affiliation": "Università della Svizzera italiana, Lugano, Switzerland", "__typename": "ArticleAuthorType" }, { "givenName": "Piotr", "surname": "Didyk", "fullName": "Piotr Didyk", "affiliation": "Università della Svizzera italiana, Lugano, Switzerland", "__typename": "ArticleAuthorType" }, { "givenName": "Qi", "surname": "Sun", "fullName": "Qi Sun", "affiliation": "New York University, New York, NY, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Henry", "surname": "Fuchs", "fullName": "Henry Fuchs", "affiliation": "University of North Carolina, Chapel Hill, NC, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "11", "pubDate": "2021-11-01 00:00:00", "pubType": "trans", "pages": "4194-4203", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iccms/2010/5642/2/05421106", "title": "SAR Speckle Reduction Based on Nonlocal Means Method", "doi": null, "abstractUrl": "/proceedings-article/iccms/2010/05421106/12OmNBhpS5P", "parentPublication": { "id": "proceedings/iccms/2010/5642/2", "title": "2010 Second International Conference on Computer Modeling and Simulation (ICCMS 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/is3c/2016/3071/0/3071a829", "title": "A Blind Estimation for Speckle Noise Based on Gaussian-Hermite Moments", "doi": null, "abstractUrl": "/proceedings-article/is3c/2016/3071a829/12OmNBr4etJ", "parentPublication": { "id": "proceedings/is3c/2016/3071/0", "title": "2016 International Symposium on Computer, Consumer and Control (IS3C)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/imvip/2007/2887/0/28870201", "title": "Speckle reduction using the discrete Fourier filtering technique", "doi": null, "abstractUrl": "/proceedings-article/imvip/2007/28870201/12OmNC4eSz7", "parentPublication": { "id": "proceedings/imvip/2007/2887/0", "title": "2007 International Machine Vision and Image Processing Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bmei/2008/3118/2/3118b510", "title": "Speckle Noise Reduction of Ultrasound Images Using M-band Wavelet Transform and Wiener Filter in a Homomorphic Framework", "doi": null, "abstractUrl": "/proceedings-article/bmei/2008/3118b510/12OmNqHqSnj", "parentPublication": { "id": "proceedings/bmei/2008/3118/2", "title": "BioMedical Engineering and Informatics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icoip/2010/4252/1/4252a135", "title": "A Speckle Reduction and Characteristic Enhancement Algorithm to Ultrasonic Image Based on Wavelet Technology", "doi": null, "abstractUrl": "/proceedings-article/icoip/2010/4252a135/12OmNqJq4BI", "parentPublication": { "id": "proceedings/icoip/2010/4252/2", "title": "Optoelectronics and Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibe/2000/0862/0/08620245", "title": "Classification and Estimation of Ultrasound Speckle Noise with Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/bibe/2000/08620245/12OmNvsm6AZ", "parentPublication": { "id": "proceedings/bibe/2000/0862/0", "title": "13th IEEE International Conference on BioInformatics and BioEngineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icassp/1991/0003/0/00150105", "title": "Restoration of speckle-degraded images using bispectra", "doi": null, "abstractUrl": "/proceedings-article/icassp/1991/00150105/12OmNwtn3pn", "parentPublication": { "id": "proceedings/icassp/1991/0003/0", "title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicse/2009/4027/0/4027a122", "title": "Speckle Noise Suppression Techniques for Ultrasound Images", "doi": null, "abstractUrl": "/proceedings-article/icicse/2009/4027a122/12OmNz5apEs", "parentPublication": { "id": "proceedings/icicse/2009/4027/0", "title": "2009 Fourth International Conference on Internet Computing for Science and Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gcis/2009/3571/4/3571d523", "title": "Speckle Noise Filtering for Sea SAR Image", "doi": null, "abstractUrl": "/proceedings-article/gcis/2009/3571d523/12OmNzICETH", "parentPublication": { "id": "proceedings/gcis/2009/3571/4", "title": "2009 WRI Global Congress on Intelligent Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956194", "title": "Speckle Image Restoration without Clean Data", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956194/1IHqaphQgZG", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09523836", "articleId": "1wpquR1qr1S", "__typename": "AdjacentArticleType" }, "next": { "fno": "09523894", "articleId": "1wpqkPb7CSY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1y2FkV9ZFKM", "title": "Nov.", "year": "2021", "issueNum": "11", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Nov.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1wpqkPb7CSY", "doi": "10.1109/TVCG.2021.3106513", "abstract": "Common existing head-mounted displays (HMDs) for virtual reality (VR) provide users with a high presence and embodiment. However, the field of view (FoV) of a typical HMD for VR is about 90 to 110 [deg] in the diagonal direction and about 70 to 90 [deg] in the vertical direction, which is narrower than that of humans. Specifically, the downward FoV of conventional HMDs is too narrow to present the user avatar&#x0027;s body and feet. To address this problem, we have developed a novel HMD with a pair of additional display units to increase the downward FoV by approximately 60 (<inline-formula><tex-math notation=\"LaTeX\">Z_$10+50$_Z</tex-math></inline-formula>) [deg]. We comprehensively investigated the effects of the increased downward FoV on the sense of immersion that includes presence, sense of self-location (SoSL), sense of agency (SoA), and sense of body ownership (SoBO) during VR experience and on patterns of head movements and cybersickness as its secondary effects. As a result, it was clarified that the HMD with an increased FoV improved presence and SoSL. Also, it was confirmed that the user could see the object below with a head movement pattern close to the real behavior, and did not suffer from cybersickness. Moreover, the effect of the increased downward FoV on SoBO and SoA was limited since it was easier to perceive the misalignment between the real and virtual bodies.", "abstracts": [ { "abstractType": "Regular", "content": "Common existing head-mounted displays (HMDs) for virtual reality (VR) provide users with a high presence and embodiment. However, the field of view (FoV) of a typical HMD for VR is about 90 to 110 [deg] in the diagonal direction and about 70 to 90 [deg] in the vertical direction, which is narrower than that of humans. Specifically, the downward FoV of conventional HMDs is too narrow to present the user avatar&#x0027;s body and feet. To address this problem, we have developed a novel HMD with a pair of additional display units to increase the downward FoV by approximately 60 (<inline-formula><tex-math notation=\"LaTeX\">$10+50$</tex-math><alternatives><graphic position=\"float\" orientation=\"portrait\" xlink:href=\"27tvcg11-nakano-3106513-eqinline-1-small.tif\"/></alternatives></inline-formula>) [deg]. We comprehensively investigated the effects of the increased downward FoV on the sense of immersion that includes presence, sense of self-location (SoSL), sense of agency (SoA), and sense of body ownership (SoBO) during VR experience and on patterns of head movements and cybersickness as its secondary effects. As a result, it was clarified that the HMD with an increased FoV improved presence and SoSL. Also, it was confirmed that the user could see the object below with a head movement pattern close to the real behavior, and did not suffer from cybersickness. Moreover, the effect of the increased downward FoV on SoBO and SoA was limited since it was easier to perceive the misalignment between the real and virtual bodies.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Common existing head-mounted displays (HMDs) for virtual reality (VR) provide users with a high presence and embodiment. However, the field of view (FoV) of a typical HMD for VR is about 90 to 110 [deg] in the diagonal direction and about 70 to 90 [deg] in the vertical direction, which is narrower than that of humans. Specifically, the downward FoV of conventional HMDs is too narrow to present the user avatar's body and feet. To address this problem, we have developed a novel HMD with a pair of additional display units to increase the downward FoV by approximately 60 (-) [deg]. We comprehensively investigated the effects of the increased downward FoV on the sense of immersion that includes presence, sense of self-location (SoSL), sense of agency (SoA), and sense of body ownership (SoBO) during VR experience and on patterns of head movements and cybersickness as its secondary effects. As a result, it was clarified that the HMD with an increased FoV improved presence and SoSL. Also, it was confirmed that the user could see the object below with a head movement pattern close to the real behavior, and did not suffer from cybersickness. Moreover, the effect of the increased downward FoV on SoBO and SoA was limited since it was easier to perceive the misalignment between the real and virtual bodies.", "title": "Head-Mounted Display with Increased Downward Field of View Improves Presence and Sense of Self-Location", "normalizedTitle": "Head-Mounted Display with Increased Downward Field of View Improves Presence and Sense of Self-Location", "fno": "09523894", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Avatars", "Helmet Mounted Displays", "Head Mounted Display", "Virtual Reality", "Diagonal Direction", "Vertical Direction", "User Avatar", "HMD", "VR Experience", "Head Movements", "Fo V Improved Presence", "Head Movement Pattern", "Real Bodies", "Virtual Bodies", "Increased Downward Field Of View", "So SL", "Sense Of Self Location", "Sense Of Agency", "Sense Of Body Ownership", "Avatars", "Resists", "Foot", "Lenses", "Legged Locomotion", "Cybersickness", "Visualization", "Downward Field Of View", "Virtual Avatar", "Presence", "Sense Of Self Location", "Head Mounted Displays" ], "authors": [ { "givenName": "Kizashi", "surname": "Nakano", "fullName": "Kizashi Nakano", "affiliation": "Nara Institute of Science and Technology, Nara, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Naoya", "surname": "Isoyama", "fullName": "Naoya Isoyama", "affiliation": "Nara Institute of Science and Technology, Nara, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Diego", "surname": "Monteiro", "fullName": "Diego Monteiro", "affiliation": "Xi'an Jiaotong-Liverpool University, Suzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Nobuchika", "surname": "Sakata", "fullName": "Nobuchika Sakata", "affiliation": "Ryukoku University, Kyoto, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Kiyoshi", "surname": "Kiyokawa", "fullName": "Kiyoshi Kiyokawa", "affiliation": "Nara Institute of Science and Technology, Nara, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Takuji", "surname": "Narumi", "fullName": "Takuji Narumi", "affiliation": "The University of Tokyo, Bunkyo City, Tokyo, Japan", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "11", "pubDate": "2021-11-01 00:00:00", "pubType": "trans", "pages": "4204-4214", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892245", "title": "Recognition and mapping of facial expressions to avatar by embedded photo reflective sensors in head mounted display", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892245/12OmNwkR5tU", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2008/2047/0/04476604", "title": "Poster: Sliding Viewport for Head Mounted Displays in Interactive Environments", "doi": null, "abstractUrl": "/proceedings-article/3dui/2008/04476604/12OmNzdoMAW", "parentPublication": { "id": "proceedings/3dui/2008/2047/0", "title": "2008 IEEE Symposium on 3D User Interfaces", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446139", "title": "The Effect of Immersive Displays on Situation Awareness in Virtual Environments for Aerial Firefighting Air Attack Supervisor Training", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446139/13bd1AIBM1Q", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09786815", "title": "Analyzing the Effect of Diverse Gaze and Head Direction on Facial Expression Recognition with Photo-Reflective Sensors Embedded in a Head-Mounted Display", "doi": null, "abstractUrl": "/journal/tg/5555/01/09786815/1DSumaVNxG8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09850416", "title": "Distance Perception in Virtual Reality: A Meta-Analysis of the Effect of Head-Mounted Display Characteristics", "doi": null, "abstractUrl": "/journal/tg/5555/01/09850416/1Fz4SPLVTMY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798320", "title": "Evaluating Dynamic Characteristics of Head Mounted Display in Parallel Movement with Simultaneous Subjective Observation Method", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798320/1cJ0TRvTuOk", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797852", "title": "Perception of Volumetric Characters&#x0027; Eye-Gaze Direction in Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797852/1cJ0UskDCRa", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2019/2297/0/229700a001", "title": "Development of Easy Attachable Biological Information Measurement Device for Various Head Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/cw/2019/229700a001/1fHkmnjJYru", "parentPublication": { "id": "proceedings/cw/2019/2297/0", "title": "2019 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089608", "title": "Angular Dependence of the Spatial Resolution in Virtual Reality Displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089608/1jIxaeEdNkc", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/02/09146716", "title": "Volumetric Head-Mounted Display With Locally Adaptive Focal Blocks", "doi": null, "abstractUrl": "/journal/tg/2022/02/09146716/1lHjPSqVrpK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09523842", "articleId": "1wpqr1B6wA8", "__typename": "AdjacentArticleType" }, "next": { "fno": "09523895", "articleId": "1wpqwrI9ISA", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1y2FlyxpzvG", "name": "ttg202111-09523894s1-supp1-3106513.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202111-09523894s1-supp1-3106513.mp4", "extension": "mp4", "size": "87.9 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1y2FkV9ZFKM", "title": "Nov.", "year": "2021", "issueNum": "11", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Nov.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1wpqwrI9ISA", "doi": "10.1109/TVCG.2021.3106493", "abstract": "We explore the design of Hand Proximate User Interfaces (HPUIs) for head-mounted displays (HMDs) to facilitate near-body interactions with the display directly projected on, or around the user&#x0027;s hand. We focus on single-handed input, while taking into consideration the hand anatomy which distorts naturally when the user interacts with the display. Through two user studies, we explore the potential for discrete as well as continuous input. For discrete input, HPUIs favor targets that are directly on the fingers (as opposed to off-finger) as they offer tactile feedback. We demonstrate that continuous interaction is also possible, and is as effective on the fingers as in the off-finger space between the index finger and thumb. We also find that with continuous input, content is more easily controlled when the interaction occurs in the vertical or horizontal axes, and less with diagonal movements. We conclude with applications and recommendations for the design of future HPUIs.", "abstracts": [ { "abstractType": "Regular", "content": "We explore the design of Hand Proximate User Interfaces (HPUIs) for head-mounted displays (HMDs) to facilitate near-body interactions with the display directly projected on, or around the user&#x0027;s hand. We focus on single-handed input, while taking into consideration the hand anatomy which distorts naturally when the user interacts with the display. Through two user studies, we explore the potential for discrete as well as continuous input. For discrete input, HPUIs favor targets that are directly on the fingers (as opposed to off-finger) as they offer tactile feedback. We demonstrate that continuous interaction is also possible, and is as effective on the fingers as in the off-finger space between the index finger and thumb. We also find that with continuous input, content is more easily controlled when the interaction occurs in the vertical or horizontal axes, and less with diagonal movements. We conclude with applications and recommendations for the design of future HPUIs.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We explore the design of Hand Proximate User Interfaces (HPUIs) for head-mounted displays (HMDs) to facilitate near-body interactions with the display directly projected on, or around the user's hand. We focus on single-handed input, while taking into consideration the hand anatomy which distorts naturally when the user interacts with the display. Through two user studies, we explore the potential for discrete as well as continuous input. For discrete input, HPUIs favor targets that are directly on the fingers (as opposed to off-finger) as they offer tactile feedback. We demonstrate that continuous interaction is also possible, and is as effective on the fingers as in the off-finger space between the index finger and thumb. We also find that with continuous input, content is more easily controlled when the interaction occurs in the vertical or horizontal axes, and less with diagonal movements. We conclude with applications and recommendations for the design of future HPUIs.", "title": "HPUI: Hand Proximate User Interfaces for One-Handed Interactions on Head Mounted Displays", "normalizedTitle": "HPUI: Hand Proximate User Interfaces for One-Handed Interactions on Head Mounted Displays", "fno": "09523895", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Haptic Interfaces", "Helmet Mounted Displays", "HPUI", "Hand Proximate User Interfaces", "One Handed Interactions", "Head Mounted Displays", "Near Body Interactions", "Single Handed Input", "Hand Anatomy", "Continuous Input", "Discrete Input", "Continuous Interaction", "User Interfaces", "Task Analysis", "Strain", "Visualization", "Virtual Reality", "Shape", "Man Machine Systems", "On Hand Projected Interfaces", "Deformable U Is", "Virtual Reality" ], "authors": [ { "givenName": "Shariff AM", "surname": "Faleel", "fullName": "Shariff AM Faleel", "affiliation": "University of Manitoba, Winnipeg, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "Michael", "surname": "Gammon", "fullName": "Michael Gammon", "affiliation": "University of Manitoba, Winnipeg, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "Kevin", "surname": "Fan", "fullName": "Kevin Fan", "affiliation": "Human-Machine Interaction Lab, Huawei, Markham, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "Da-Yuan", "surname": "Huang", "fullName": "Da-Yuan Huang", "affiliation": "Human-Machine Interaction Lab, Huawei, Markham, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "Wei", "surname": "Li", "fullName": "Wei Li", "affiliation": "Human-Machine Interaction Lab, Huawei, Markham, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "Pourang", "surname": "Irani", "fullName": "Pourang Irani", "affiliation": "University of Manitoba, Winnipeg, Canada", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "11", "pubDate": "2021-11-01 00:00:00", "pubType": "trans", "pages": "4215-4225", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icat/2013/11/0/06728904", "title": "A proposal of two-handed multi-finger haptic interface with rotary frame", "doi": null, "abstractUrl": "/proceedings-article/icat/2013/06728904/12OmNyen1lo", "parentPublication": { "id": "proceedings/icat/2013/11/0", "title": "2013 23rd International Conference on Artificial Reality and Telexistence (ICAT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446217", "title": "Effects of Image Size and Structural Complexity on Time and Precision of Hand Movements in Head Mounted Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446217/13bd1AITn9W", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/04/08263123", "title": "MRTouch: Adding Touch Input to Head-Mounted Mixed Reality", "doi": null, "abstractUrl": "/journal/tg/2018/04/08263123/13rRUyft7D9", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699201", "title": "SWAG Demo: Smart Watch Assisted Gesture Interaction for Mixed Reality Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699201/19F1VvOVhew", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a582", "title": "Multi-Touch Smartphone-Based Progressive Refinement VR Selection", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a582/1CJcBfmyX5K", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a646", "title": "A Pinch-based Text Entry Method for Head-mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a646/1CJeVfhmmkg", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a074", "title": "An Exploration of Hands-free Text Selection for Virtual Reality Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a074/1JrRaeV82L6", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089533", "title": "HiPad: Text entry for Head-Mounted Displays Using Circular Touchpad", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089533/1jIx7JtSOTC", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090681", "title": "Accuracy of Commodity Finger Tracking Systems for Virtual Reality Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090681/1jIxoZtoPlK", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a519", "title": "TapID: Rapid Touch Interaction in Virtual Reality using Wearable Sensing", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a519/1tuBtNYt0LC", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09523894", "articleId": "1wpqkPb7CSY", "__typename": "AdjacentArticleType" }, "next": { "fno": "09523838", "articleId": "1wpqsbFen3G", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1y2FkV9ZFKM", "title": "Nov.", "year": "2021", "issueNum": "11", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Nov.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1wpqsbFen3G", "doi": "10.1109/TVCG.2021.3106505", "abstract": "Mobile AR applications benefit from fast initialization to display world-locked effects instantly. However, standard visual odometry or SLAM algorithms require motion parallax to initialize (see <xref ref-type=\"fig\" rid=\"fig1\">Figure 1</xref>) and, therefore, suffer from delayed initialization. In this paper, we present a 6-DoF monocular visual odometry that initializes instantly and without motion parallax. Our main contribution is a pose estimator that decouples estimating the 5-DoF relative rotation and translation direction from the 1-DoF translation magnitude. While scale is not observable in a monocular vision-only setting, it is still paramount to estimate a <italic>consistent</italic> scale over the whole trajectory (even if not physically accurate) to avoid AR effects moving erroneously along depth. In our approach, we leverage the fact that depth errors are not perceivable to the user during rotation-only motion. However, as the user starts translating the device, depth becomes perceivable and so does the capability to estimate consistent scale. Our proposed algorithm naturally transitions between these two modes. Our second contribution is a novel residual in the relative pose problem to further improve the results. The residual combines the Jacobians of the functional and the functional itself and is minimized using a Levenberg&#x2013;Marquardt optimizer on the 5-DoF manifold. We perform extensive validations of our contributions with both a publicly available dataset and synthetic data. We show that the proposed pose estimator outperforms the classical approaches for 6-DoF pose estimation used in the literature in low-parallax configurations. Likewise, we show our relative pose estimator outperforms state-of-the-art approaches in an odometry pipeline configuration where we can leverage initial guesses. We release a dataset for the relative pose problem using real data to facilitate the comparison with future solutions for the relative pose problem. Our solution is either used as a full odometry or as a pre-SLAM component of any supported SLAM system (ARKit, ARCore) in world-locked AR effects on platforms such as Instagram and Facebook.", "abstracts": [ { "abstractType": "Regular", "content": "Mobile AR applications benefit from fast initialization to display world-locked effects instantly. However, standard visual odometry or SLAM algorithms require motion parallax to initialize (see <xref ref-type=\"fig\" rid=\"fig1\">Figure 1</xref>) and, therefore, suffer from delayed initialization. In this paper, we present a 6-DoF monocular visual odometry that initializes instantly and without motion parallax. Our main contribution is a pose estimator that decouples estimating the 5-DoF relative rotation and translation direction from the 1-DoF translation magnitude. While scale is not observable in a monocular vision-only setting, it is still paramount to estimate a <italic>consistent</italic> scale over the whole trajectory (even if not physically accurate) to avoid AR effects moving erroneously along depth. In our approach, we leverage the fact that depth errors are not perceivable to the user during rotation-only motion. However, as the user starts translating the device, depth becomes perceivable and so does the capability to estimate consistent scale. Our proposed algorithm naturally transitions between these two modes. Our second contribution is a novel residual in the relative pose problem to further improve the results. The residual combines the Jacobians of the functional and the functional itself and is minimized using a Levenberg&#x2013;Marquardt optimizer on the 5-DoF manifold. We perform extensive validations of our contributions with both a publicly available dataset and synthetic data. We show that the proposed pose estimator outperforms the classical approaches for 6-DoF pose estimation used in the literature in low-parallax configurations. Likewise, we show our relative pose estimator outperforms state-of-the-art approaches in an odometry pipeline configuration where we can leverage initial guesses. We release a dataset for the relative pose problem using real data to facilitate the comparison with future solutions for the relative pose problem. Our solution is either used as a full odometry or as a pre-SLAM component of any supported SLAM system (ARKit, ARCore) in world-locked AR effects on platforms such as Instagram and Facebook.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Mobile AR applications benefit from fast initialization to display world-locked effects instantly. However, standard visual odometry or SLAM algorithms require motion parallax to initialize (see Figure 1) and, therefore, suffer from delayed initialization. In this paper, we present a 6-DoF monocular visual odometry that initializes instantly and without motion parallax. Our main contribution is a pose estimator that decouples estimating the 5-DoF relative rotation and translation direction from the 1-DoF translation magnitude. While scale is not observable in a monocular vision-only setting, it is still paramount to estimate a consistent scale over the whole trajectory (even if not physically accurate) to avoid AR effects moving erroneously along depth. In our approach, we leverage the fact that depth errors are not perceivable to the user during rotation-only motion. However, as the user starts translating the device, depth becomes perceivable and so does the capability to estimate consistent scale. Our proposed algorithm naturally transitions between these two modes. Our second contribution is a novel residual in the relative pose problem to further improve the results. The residual combines the Jacobians of the functional and the functional itself and is minimized using a Levenberg–Marquardt optimizer on the 5-DoF manifold. We perform extensive validations of our contributions with both a publicly available dataset and synthetic data. We show that the proposed pose estimator outperforms the classical approaches for 6-DoF pose estimation used in the literature in low-parallax configurations. Likewise, we show our relative pose estimator outperforms state-of-the-art approaches in an odometry pipeline configuration where we can leverage initial guesses. We release a dataset for the relative pose problem using real data to facilitate the comparison with future solutions for the relative pose problem. Our solution is either used as a full odometry or as a pre-SLAM component of any supported SLAM system (ARKit, ARCore) in world-locked AR effects on platforms such as Instagram and Facebook.", "title": "Instant Visual Odometry Initialization for Mobile AR", "normalizedTitle": "Instant Visual Odometry Initialization for Mobile AR", "fno": "09523838", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Distance Measurement", "Image Sensors", "Image Sequences", "Kalman Filters", "Mobile Robots", "Pose Estimation", "Robot Vision", "SLAM Robots", "Depth Errors", "Rotation Only Motion", "Consistent Scale", "Relative Pose Problem", "5 Do F Manifold", "Low Parallax Configurations", "Relative Pose Estimator", "Odometry Pipeline Configuration", "Leverage Initial Guesses", "World Locked AR Effects", "Instant Visual Odometry Initialization", "Mobile AR Applications", "Fast Initialization", "World Locked Effects", "Standard Visual Odometry", "SLAM Algorithms", "Motion Parallax", "Delayed Initialization", "6 Do F Monocular Visual Odometry", "Initializes", "5 Do F Relative Rotation", "Translation Direction", "1 Do F Translation Magnitude", "Monocular Vision Only", "Cameras", "Simultaneous Localization And Mapping", "Transmission Line Matrix Methods", "Sensors", "Feature Extraction", "Visual Odometry", "Tracking", "Monocular Initialization", "Relative Pose Estimator", "Visual Odometry", "AR Instant Placement" ], "authors": [ { "givenName": "Alejo", "surname": "Concha", "fullName": "Alejo Concha", "affiliation": "Facebook, Zurich, Switzerland", "__typename": "ArticleAuthorType" }, { "givenName": "Michael", "surname": "Burri", "fullName": "Michael Burri", "affiliation": "Facebook, Zurich, Switzerland", "__typename": "ArticleAuthorType" }, { "givenName": "Jesús", "surname": "Briales", "fullName": "Jesús Briales", "affiliation": "Facebook, Zurich, Switzerland", "__typename": "ArticleAuthorType" }, { "givenName": "Christian", "surname": "Forster", "fullName": "Christian Forster", "affiliation": "Facebook, Zurich, Switzerland", "__typename": "ArticleAuthorType" }, { "givenName": "Luc", "surname": "Oth", "fullName": "Luc Oth", "affiliation": "Facebook, Zurich, Switzerland", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "11", "pubDate": "2021-11-01 00:00:00", "pubType": "trans", "pages": "4226-4235", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/3dv/2017/2610/0/261001a155", "title": "GSLAM: Initialization-Robust Monocular Visual SLAM via Global Structure-from-Motion", "doi": null, "abstractUrl": "/proceedings-article/3dv/2017/261001a155/12OmNAJ4peW", "parentPublication": { "id": "proceedings/3dv/2017/2610/0", "title": "2017 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/psivt/2010/4285/0/4285a121", "title": "Modeling of Unbounded Long-Range Drift in Visual Odometry", "doi": null, "abstractUrl": "/proceedings-article/psivt/2010/4285a121/12OmNB6UIc9", "parentPublication": { "id": "proceedings/psivt/2010/4285/0", "title": "Image and Video Technology, Pacific-Rim Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2014/4258/0/4258a227", "title": "A Fast Feature Tracking Algorithm for Visual Odometry and Mapping Based on RGB-D Sensors", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2014/4258a227/12OmNxbEtOb", "parentPublication": { "id": "proceedings/sibgrapi/2014/4258/0", "title": "2014 27th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbrlarsrobocontrol/2014/6711/0/07024256", "title": "A Fast Visual Odometry and Mapping System for RGB-D Cameras", "doi": null, "abstractUrl": "/proceedings-article/sbrlarsrobocontrol/2014/07024256/12OmNylboJA", "parentPublication": { "id": "proceedings/sbrlarsrobocontrol/2014/6711/0", "title": "2014 Joint Conference on Robotics: SBR-LARS Robotics Symposium and Robocontrol (SBR LARS Robocontrol)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icbase/2021/2709/0/270900a654", "title": "Stereo Visual Odometry with Information Enhancement at Feature Points", "doi": null, "abstractUrl": "/proceedings-article/icbase/2021/270900a654/1AH8aY1BiV2", "parentPublication": { "id": "proceedings/icbase/2021/2709/0", "title": "2021 2nd International Conference on Big Data & Artificial Intelligence & Software Engineering (ICBASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/5555/01/10086694", "title": "SDV-LOAM: Semi-Direct Visual-LiDAR Odometry and Mapping", "doi": null, "abstractUrl": "/journal/tp/5555/01/10086694/1LUpwXZtAe4", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/4.803E306", "title": "Unsupervised Collaborative Learning of Keyframe Detection and Visual Odometry Towards Monocular Deep SLAM", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/4.803E306/1hQqtAaoUes", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2020/9891/0/09108693", "title": "Depth Prediction for Monocular Direct Visual Odometry", "doi": null, "abstractUrl": "/proceedings-article/crv/2020/09108693/1kpIGiAFaYo", "parentPublication": { "id": "proceedings/crv/2020/9891/0", "title": "2020 17th Conference on Computer and Robot Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2020/9360/0/09150639", "title": "Dynamic Attention-based Visual Odometry", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2020/09150639/1lPHvonwPTO", "parentPublication": { "id": "proceedings/cvprw/2020/9360/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800e928", "title": "Information-Driven Direct RGB-D Odometry", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800e928/1m3osoTCN44", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09523895", "articleId": "1wpqwrI9ISA", "__typename": "AdjacentArticleType" }, "next": { "fno": "09523845", "articleId": "1wpqkYgQZd6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1y2Fs2wOWqs", "name": "ttg202111-09523838s1-supp1-3106505.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202111-09523838s1-supp1-3106505.mp4", "extension": "mp4", "size": "52.6 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1y2FkV9ZFKM", "title": "Nov.", "year": "2021", "issueNum": "11", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Nov.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1wpqkYgQZd6", "doi": "10.1109/TVCG.2021.3106434", "abstract": "Proper occlusion based rendering is very important to achieve realism in all indoor and outdoor Augmented Reality (AR) applications. This paper addresses the problem of fast and accurate dynamic occlusion reasoning by real objects in the scene for large scale outdoor AR applications. Conceptually, proper occlusion reasoning requires an estimate of depth for every point in augmented scene which is technically hard to achieve for outdoor scenarios, especially in the presence of moving objects. We propose a method to detect and automatically infer the depth for real objects in the scene without explicit detailed scene modeling and depth sensing (e.g. without using sensors such as 3D-LiDAR). Specifically, we employ instance segmentation of color image data to detect real dynamic objects in the scene and use either a top-down terrain elevation model or deep learning based monocular depth estimation model to infer their metric distance from the camera for proper occlusion reasoning in real time. The realized solution is implemented in a low latency real-time framework for video-see-though AR and is directly extendable to optical-see-through AR. We minimize latency in depth reasoning and occlusion rendering by doing semantic object tracking and prediction in video frames.", "abstracts": [ { "abstractType": "Regular", "content": "Proper occlusion based rendering is very important to achieve realism in all indoor and outdoor Augmented Reality (AR) applications. This paper addresses the problem of fast and accurate dynamic occlusion reasoning by real objects in the scene for large scale outdoor AR applications. Conceptually, proper occlusion reasoning requires an estimate of depth for every point in augmented scene which is technically hard to achieve for outdoor scenarios, especially in the presence of moving objects. We propose a method to detect and automatically infer the depth for real objects in the scene without explicit detailed scene modeling and depth sensing (e.g. without using sensors such as 3D-LiDAR). Specifically, we employ instance segmentation of color image data to detect real dynamic objects in the scene and use either a top-down terrain elevation model or deep learning based monocular depth estimation model to infer their metric distance from the camera for proper occlusion reasoning in real time. The realized solution is implemented in a low latency real-time framework for video-see-though AR and is directly extendable to optical-see-through AR. We minimize latency in depth reasoning and occlusion rendering by doing semantic object tracking and prediction in video frames.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Proper occlusion based rendering is very important to achieve realism in all indoor and outdoor Augmented Reality (AR) applications. This paper addresses the problem of fast and accurate dynamic occlusion reasoning by real objects in the scene for large scale outdoor AR applications. Conceptually, proper occlusion reasoning requires an estimate of depth for every point in augmented scene which is technically hard to achieve for outdoor scenarios, especially in the presence of moving objects. We propose a method to detect and automatically infer the depth for real objects in the scene without explicit detailed scene modeling and depth sensing (e.g. without using sensors such as 3D-LiDAR). Specifically, we employ instance segmentation of color image data to detect real dynamic objects in the scene and use either a top-down terrain elevation model or deep learning based monocular depth estimation model to infer their metric distance from the camera for proper occlusion reasoning in real time. The realized solution is implemented in a low latency real-time framework for video-see-though AR and is directly extendable to optical-see-through AR. We minimize latency in depth reasoning and occlusion rendering by doing semantic object tracking and prediction in video frames.", "title": "Long-Range Augmented Reality with Dynamic Occlusion Rendering", "normalizedTitle": "Long-Range Augmented Reality with Dynamic Occlusion Rendering", "fno": "09523845", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Augmented Reality", "Cameras", "Computer Vision", "Image Colour Analysis", "Image Segmentation", "Inference Mechanisms", "Learning Artificial Intelligence", "Object Detection", "Object Tracking", "Optical Radar", "Rendering Computer Graphics", "Accurate Dynamic Occlusion Reasoning", "Scale Outdoor AR Applications", "Proper Occlusion Reasoning", "Augmented Scene", "Outdoor Scenarios", "Moving Objects", "Explicit Detailed Scene Modeling", "Dynamic Objects", "Terrain Elevation Model", "Monocular Depth Estimation Model", "Depth Reasoning", "Semantic Object Tracking", "Long Range Augmented Reality", "Dynamic Occlusion Rendering", "Proper Occlusion Based Rendering", "Rendering Computer Graphics", "Cognition", "Cameras", "Real Time Systems", "Image Segmentation", "Estimation", "Navigation", "Augmented Reality", "Occlusion Reasoning", "Depth Inference", "Object Tracking" ], "authors": [ { "givenName": "Mikhail", "surname": "Sizintsev", "fullName": "Mikhail Sizintsev", "affiliation": "SRI International, Princeton, NJ, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Niluthpol Chowdhury", "surname": "Mithun", "fullName": "Niluthpol Chowdhury Mithun", "affiliation": "SRI International, Princeton, NJ, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Han-Pang", "surname": "Chiu", "fullName": "Han-Pang Chiu", "affiliation": "SRI International, Princeton, NJ, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Supun", "surname": "Samarasekera", "fullName": "Supun Samarasekera", "affiliation": "SRI International, Princeton, NJ, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Rakesh", "surname": "Kumar", "fullName": "Rakesh Kumar", "affiliation": "SRI International, Princeton, NJ, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "11", "pubDate": "2021-11-01 00:00:00", "pubType": "trans", "pages": "4236-4244", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/bracis/2015/0016/0/0016a128", "title": "An Occlusion Calculus Based on an Interval Algebra", "doi": null, "abstractUrl": "/proceedings-article/bracis/2015/0016a128/12OmNBU1jDV", "parentPublication": { "id": "proceedings/bracis/2015/0016/0", "title": "2015 Brazilian Conference on Intelligent Systems (BRACIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2015/6683/0/6683a618", "title": "AR-Weapon: Live Augmented Reality Based First-Person Shooting System", "doi": null, "abstractUrl": "/proceedings-article/wacv/2015/6683a618/12OmNqH9hoT", "parentPublication": { "id": "proceedings/wacv/2015/6683/0", "title": "2015 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2016/3641/0/3641a054", "title": "Edge Snapping-Based Depth Enhancement for Dynamic Occlusion Handling in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2016/3641a054/12OmNrFTr6j", "parentPublication": { "id": "proceedings/ismar/2016/3641/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391e417", "title": "Large Displacement 3D Scene Flow with Occlusion Reasoning", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391e417/12OmNwoxSc6", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2010/9343/0/05643558", "title": "Foreground and shadow occlusion handling for outdoor augmented reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2010/05643558/12OmNyRPgDK", "parentPublication": { "id": "proceedings/ismar/2010/9343/0", "title": "2010 IEEE International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2011/06/mcg2011060068", "title": "Nonpinhole Approximations for Interactive Rendering", "doi": null, "abstractUrl": "/magazine/cg/2011/06/mcg2011060068/13rRUxly9gf", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2017/2636/0/263600a298", "title": "Real-Time Augmented Reality with Occlusion Handling Based on RGBD Images", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2017/263600a298/1ap5xY9dZni", "parentPublication": { "id": "proceedings/icvrv/2017/2636/0", "title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798025", "title": "Occlusion Management in VR: A Comparative Study", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798025/1cJ1f6V69wY", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/05/08998139", "title": "Factored Occlusion: Single Spatial Light Modulator Occlusion-capable Optical See-through Augmented Reality Display", "doi": null, "abstractUrl": "/journal/tg/2020/05/08998139/1hrXe0Hbv0I", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a246", "title": "Occlusion Handling in Outdoor Augmented Reality using a Combination of Map Data and Instance Segmentation", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a246/1yeQZ7zP5ks", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09523838", "articleId": "1wpqsbFen3G", "__typename": "AdjacentArticleType" }, "next": { "fno": "09523840", "articleId": "1wpqvrW88O4", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1y2FmMrGS7m", "name": "ttg202111-09523845s1-supp1-3106434.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202111-09523845s1-supp1-3106434.mp4", "extension": "mp4", "size": "52.2 MB", "__typename": "WebExtraType" }, { "id": "1y2FmtH1Tl6", "name": "ttg202111-09523845s1-supp2-3106434.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202111-09523845s1-supp2-3106434.mp4", "extension": "mp4", "size": "21.2 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1y2FkV9ZFKM", "title": "Nov.", "year": "2021", "issueNum": "11", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Nov.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1wpqvrW88O4", "doi": "10.1109/TVCG.2021.3106491", "abstract": "We present a novel online 3D scanning system for high-quality object reconstruction with a mobile device, called Mobile3DScanner. Using a mobile device equipped with an embedded RGBD camera, our system provides online 3D object reconstruction capability for users to acquire high-quality textured 3D object models. Starting with a simultaneous pose tracking and TSDF fusion module, our system allows users to scan an object with a mobile device to get a 3D model for real-time preview. After the real-time scanning process is completed, the scanned 3D model is globally optimized and mapped with multi-view textures as an efficient postprocess to get the final textured 3D model on the mobile device. Unlike most existing state-of-the-art systems which can only scan homeware objects such as toys with small dimensions due to the limited computation and memory resources of mobile platforms, our system can reconstruct objects with large dimensions such as statues. We propose a novel visual-inertial ICP approach to achieve real-time accurate 6DoF pose tracking of each incoming frame on the front end, while maintaining a keyframe pool on the back end where the keyframe poses are optimized by local BA. Simultaneously, the keyframe depth maps are fused by the optimized poses to a TSDF model in real-time. Especially, we propose a novel adaptive voxel resizing strategy to solve the out-of-memory problem of large dimension TSDF fusion on mobile platforms. In the post-process, the keyframe poses are globally optimized and the keyframe depth maps are optimized and fused to obtain a final object model with more accurate geometry. The experiments with quantitative and qualitative evaluation demonstrate the effectiveness of the proposed 3D scanning system based on a mobile device, which can successfully achieve online high-quality 3D reconstruction of natural objects with larger dimensions for efficient AR content creation.", "abstracts": [ { "abstractType": "Regular", "content": "We present a novel online 3D scanning system for high-quality object reconstruction with a mobile device, called Mobile3DScanner. Using a mobile device equipped with an embedded RGBD camera, our system provides online 3D object reconstruction capability for users to acquire high-quality textured 3D object models. Starting with a simultaneous pose tracking and TSDF fusion module, our system allows users to scan an object with a mobile device to get a 3D model for real-time preview. After the real-time scanning process is completed, the scanned 3D model is globally optimized and mapped with multi-view textures as an efficient postprocess to get the final textured 3D model on the mobile device. Unlike most existing state-of-the-art systems which can only scan homeware objects such as toys with small dimensions due to the limited computation and memory resources of mobile platforms, our system can reconstruct objects with large dimensions such as statues. We propose a novel visual-inertial ICP approach to achieve real-time accurate 6DoF pose tracking of each incoming frame on the front end, while maintaining a keyframe pool on the back end where the keyframe poses are optimized by local BA. Simultaneously, the keyframe depth maps are fused by the optimized poses to a TSDF model in real-time. Especially, we propose a novel adaptive voxel resizing strategy to solve the out-of-memory problem of large dimension TSDF fusion on mobile platforms. In the post-process, the keyframe poses are globally optimized and the keyframe depth maps are optimized and fused to obtain a final object model with more accurate geometry. The experiments with quantitative and qualitative evaluation demonstrate the effectiveness of the proposed 3D scanning system based on a mobile device, which can successfully achieve online high-quality 3D reconstruction of natural objects with larger dimensions for efficient AR content creation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a novel online 3D scanning system for high-quality object reconstruction with a mobile device, called Mobile3DScanner. Using a mobile device equipped with an embedded RGBD camera, our system provides online 3D object reconstruction capability for users to acquire high-quality textured 3D object models. Starting with a simultaneous pose tracking and TSDF fusion module, our system allows users to scan an object with a mobile device to get a 3D model for real-time preview. After the real-time scanning process is completed, the scanned 3D model is globally optimized and mapped with multi-view textures as an efficient postprocess to get the final textured 3D model on the mobile device. Unlike most existing state-of-the-art systems which can only scan homeware objects such as toys with small dimensions due to the limited computation and memory resources of mobile platforms, our system can reconstruct objects with large dimensions such as statues. We propose a novel visual-inertial ICP approach to achieve real-time accurate 6DoF pose tracking of each incoming frame on the front end, while maintaining a keyframe pool on the back end where the keyframe poses are optimized by local BA. Simultaneously, the keyframe depth maps are fused by the optimized poses to a TSDF model in real-time. Especially, we propose a novel adaptive voxel resizing strategy to solve the out-of-memory problem of large dimension TSDF fusion on mobile platforms. In the post-process, the keyframe poses are globally optimized and the keyframe depth maps are optimized and fused to obtain a final object model with more accurate geometry. The experiments with quantitative and qualitative evaluation demonstrate the effectiveness of the proposed 3D scanning system based on a mobile device, which can successfully achieve online high-quality 3D reconstruction of natural objects with larger dimensions for efficient AR content creation.", "title": "Mobile3DScanner: An Online 3D Scanner for High-quality Object Reconstruction with a Mobile Device", "normalizedTitle": "Mobile3DScanner: An Online 3D Scanner for High-quality Object Reconstruction with a Mobile Device", "fno": "09523840", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Cameras", "Image Reconstruction", "Image Texture", "Pose Estimation", "Solid Modelling", "Stereo Image Processing", "Online 3 D Scanner", "High Quality Object Reconstruction", "Mobile Device", "Real Time Scanning Process", "Final Textured 3 D Model", "Mobile Platforms", "Real Time Accurate 6 Do F Pose Tracking", "Keyframe Poses", "Final Object Model", "High Quality 3 D Reconstruction", "Pose Tracking TSDF Fusion Module", "Mobile 3 D Scanner", "Online 3 D Scanning System", "Embedded RGBD Camera", "High Quality Textured 3 D Object Models", "Real Time Preview", "Multiview Textures", "Homeware Objects", "Limited Computation", "Memory Resources", "Visual Inertial ICP Approach", "Keyframe Pool", "Keyframe Depth Maps", "Adaptive Voxel Resizing", "Out Of Memory Problem", "Large Dimension TSDF Fusion", "Quantitative Evaluation", "Qualitative Evaluation", "Efficient AR Content Creation", "Three Dimensional Displays", "Cameras", "Real Time Systems", "Solid Modeling", "Mobile Handsets", "Image Reconstruction", "Computational Modeling", "Object Scanning", "3 D Reconstruction", "Visual Inertial Pose Tracking", "Adaptive Voxel Resizing" ], "authors": [ { "givenName": "Xiaojun", "surname": "Xiang", "fullName": "Xiaojun Xiang", "affiliation": "SenseTime Research and Tetras, Hong Kong, AI, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Hanqing", "surname": "Jiang", "fullName": "Hanqing Jiang", "affiliation": "SenseTime Research, Hong Kong, AI, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Guofeng", "surname": "Zhang", "fullName": "Guofeng Zhang", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, 310058, P. R. China", "__typename": "ArticleAuthorType" }, { "givenName": "Yihao", "surname": "Yu", "fullName": "Yihao Yu", "affiliation": "SenseTime Research and Tetras, Hong Kong, AI, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Chenchen", "surname": "Li", "fullName": "Chenchen Li", "affiliation": "SenseTime Research, Hong Kong, AI, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Xingbin", "surname": "Yang", "fullName": "Xingbin Yang", "affiliation": "SenseTime Research and Tetras, Hong Kong, AI, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Danpeng", "surname": "Chen", "fullName": "Danpeng Chen", "affiliation": "SenseTime Research and Tetras, Hong Kong, AI, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Hujun", "surname": "Bao", "fullName": "Hujun Bao", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, 310058, P. R. China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "11", "pubDate": "2021-11-01 00:00:00", "pubType": "trans", "pages": "4245-4255", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/wacv/2014/4985/0/06836118", "title": "Fast dense 3D reconstruction using an adaptive multiscale discrete-continuous variational method", "doi": null, "abstractUrl": "/proceedings-article/wacv/2014/06836118/12OmNBTs7oB", "parentPublication": { "id": "proceedings/wacv/2014/4985/0", "title": "2014 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836532", "title": "Robust Keyframe-Based Monocular SLAM for Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836532/12OmNx5GU8K", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2013/2840/0/2840c800", "title": "A Flexible Scene Representation for 3D Reconstruction Using an RGB-D Camera", "doi": null, "abstractUrl": "/proceedings-article/iccv/2013/2840c800/12OmNyLiuEW", "parentPublication": { "id": "proceedings/iccv/2013/2840/0", "title": "2013 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/02/07833201", "title": "Surface Reconstruction via Fusing Sparse-Sequence of Depth Images", "doi": null, "abstractUrl": "/journal/tg/2018/02/07833201/13rRUx0gezW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acit/2018/0385/0/08672679", "title": "Design of an Automated 3D Scanner", "doi": null, "abstractUrl": "/proceedings-article/acit/2018/08672679/18IpjjmLjgc", "parentPublication": { "id": "proceedings/acit/2018/0385/0", "title": "2018 International Arab Conference on Information Technology (ACIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2018/8497/0/849700a001", "title": "Keyframe-Based Texture Mapping for RGBD Human Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2018/849700a001/1a3x6hGWsso", "parentPublication": { "id": "proceedings/icvrv/2018/8497/0", "title": "2018 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a211", "title": "Blended-Keyframes for Mobile Mediated Reality Applications", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a211/1gysoMThT0Y", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/08/09007740", "title": "Variational Level Set Evolution for Non-Rigid 3D Reconstruction From a Single Depth Camera", "doi": null, "abstractUrl": "/journal/tp/2021/08/09007740/1hGqrsQbjPO", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09201064", "title": "Mobile3DRecon: Real-time Monocular 3D Reconstruction on a Mobile Phone", "doi": null, "abstractUrl": "/journal/tg/2020/12/09201064/1niUpdweh2g", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900i928", "title": "DI-Fusion: Online Implicit 3D Reconstruction with Deep Priors", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900i928/1yeLpskgFXi", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09523845", "articleId": "1wpqkYgQZd6", "__typename": "AdjacentArticleType" }, "next": { "fno": "09523847", "articleId": "1wpqmNfLX9e", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1y2Fu2OhVnO", "name": "ttg202111-09523840s1-supp2-3106491.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202111-09523840s1-supp2-3106491.mp4", "extension": "mp4", "size": "81 MB", "__typename": "WebExtraType" }, { "id": "1y2FuKcmr5e", "name": "ttg202111-09523840s1-supp1-3106491.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202111-09523840s1-supp1-3106491.mp4", "extension": "mp4", "size": "102 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1y2FkV9ZFKM", "title": "Nov.", "year": "2021", "issueNum": "11", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Nov.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1wpqmNfLX9e", "doi": "10.1109/TVCG.2021.3106486", "abstract": "Stereoscopic projection mapping (PM) allows a user to see a three-dimensional (3D) computer-generated (CG) object floating over physical surfaces of arbitrary shapes around us using projected imagery. However, the current stereoscopic PM technology only satisfies binocular cues and is not capable of providing correct focus cues, which causes a vergence-accommodation conflict (VAC). Therefore, we propose a multifocal approach to mitigate VAC in stereoscopic PM. Our primary technical contribution is to attach electrically focus-tunable lenses (ETLs) to active shutter glasses to control both vergence and accommodation. Specifically, we apply fast and periodical focal sweeps to the ETLs, which causes the &#x201c;virtual image&#x201d; (as an optical term) of a scene observed through the ETLs to move back and forth during each sweep period. A 3D CG object is projected from a synchronized high-speed projector only when the virtual image of the projected imagery is located at a desired distance. This provides an observer with the correct focus cues required. In this study, we solve three technical issues that are unique to stereoscopic PM: <xref ref-type=\"disp-formula\" rid=\"deqn1\">(1)</xref> The 3D CG object is displayed on non-planar and even moving surfaces; <xref ref-type=\"disp-formula\" rid=\"deqn2\">(2)</xref> the physical surfaces need to be shown without the focus modulation; <xref ref-type=\"disp-formula\" rid=\"deqn3\">(3)</xref> the shutter glasses additionally need to be synchronized with the ETLs and the projector. We also develop a novel compensation technique to deal with the &#x201c;lens breathing&#x201d; artifact that varies the retinal size of the virtual image through focal length modulation. Further, using a proof-of-concept prototype, we demonstrate that our technique can present the virtual image of a target 3D CG object at the correct depth. Finally, we validate the advantage provided by our technique by comparing it with conventional stereoscopic PM using a user study on a depth-matching task.", "abstracts": [ { "abstractType": "Regular", "content": "Stereoscopic projection mapping (PM) allows a user to see a three-dimensional (3D) computer-generated (CG) object floating over physical surfaces of arbitrary shapes around us using projected imagery. However, the current stereoscopic PM technology only satisfies binocular cues and is not capable of providing correct focus cues, which causes a vergence-accommodation conflict (VAC). Therefore, we propose a multifocal approach to mitigate VAC in stereoscopic PM. Our primary technical contribution is to attach electrically focus-tunable lenses (ETLs) to active shutter glasses to control both vergence and accommodation. Specifically, we apply fast and periodical focal sweeps to the ETLs, which causes the &#x201c;virtual image&#x201d; (as an optical term) of a scene observed through the ETLs to move back and forth during each sweep period. A 3D CG object is projected from a synchronized high-speed projector only when the virtual image of the projected imagery is located at a desired distance. This provides an observer with the correct focus cues required. In this study, we solve three technical issues that are unique to stereoscopic PM: <xref ref-type=\"disp-formula\" rid=\"deqn1\">(1)</xref> The 3D CG object is displayed on non-planar and even moving surfaces; <xref ref-type=\"disp-formula\" rid=\"deqn2\">(2)</xref> the physical surfaces need to be shown without the focus modulation; <xref ref-type=\"disp-formula\" rid=\"deqn3\">(3)</xref> the shutter glasses additionally need to be synchronized with the ETLs and the projector. We also develop a novel compensation technique to deal with the &#x201c;lens breathing&#x201d; artifact that varies the retinal size of the virtual image through focal length modulation. Further, using a proof-of-concept prototype, we demonstrate that our technique can present the virtual image of a target 3D CG object at the correct depth. Finally, we validate the advantage provided by our technique by comparing it with conventional stereoscopic PM using a user study on a depth-matching task.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Stereoscopic projection mapping (PM) allows a user to see a three-dimensional (3D) computer-generated (CG) object floating over physical surfaces of arbitrary shapes around us using projected imagery. However, the current stereoscopic PM technology only satisfies binocular cues and is not capable of providing correct focus cues, which causes a vergence-accommodation conflict (VAC). Therefore, we propose a multifocal approach to mitigate VAC in stereoscopic PM. Our primary technical contribution is to attach electrically focus-tunable lenses (ETLs) to active shutter glasses to control both vergence and accommodation. Specifically, we apply fast and periodical focal sweeps to the ETLs, which causes the “virtual image” (as an optical term) of a scene observed through the ETLs to move back and forth during each sweep period. A 3D CG object is projected from a synchronized high-speed projector only when the virtual image of the projected imagery is located at a desired distance. This provides an observer with the correct focus cues required. In this study, we solve three technical issues that are unique to stereoscopic PM: (1) The 3D CG object is displayed on non-planar and even moving surfaces; (2) the physical surfaces need to be shown without the focus modulation; (3) the shutter glasses additionally need to be synchronized with the ETLs and the projector. We also develop a novel compensation technique to deal with the “lens breathing” artifact that varies the retinal size of the virtual image through focal length modulation. Further, using a proof-of-concept prototype, we demonstrate that our technique can present the virtual image of a target 3D CG object at the correct depth. Finally, we validate the advantage provided by our technique by comparing it with conventional stereoscopic PM using a user study on a depth-matching task.", "title": "Multifocal Stereoscopic Projection Mapping", "normalizedTitle": "Multifocal Stereoscopic Projection Mapping", "fno": "09523847", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Lenses", "Optical Focusing", "Optical Projectors", "Optical Scanners", "Stereo Image Processing", "Three Dimensional Displays", "Visual Perception", "Technical Issues", "CG", "Physical Surfaces", "Focus Modulation", "ET Ls", "Virtual Image", "Focal Length Modulation", "Correct Depth", "Multifocal Stereoscopic Projection", "Stereoscopic Projection Mapping", "Three Dimensional Computer Generated", "Arbitrary Shapes", "Projected Imagery", "Current Stereoscopic PM Technology", "Binocular Cues", "Correct Focus Cues", "Vergence Accommodation Conflict", "VAC", "Multifocal Approach", "Primary Technical Contribution", "Focus Tunable Lenses", "Active Shutter Glasses", "Periodical Focal Sweeps", "Optical Term", "Sweep Period", "High Speed Projector", "Stereo Image Processing", "Three Dimensional Displays", "Optical Imaging", "Adaptive Optics", "Glass", "Lenses", "Observers", "Stereoscopic Projection Mapping", "Multifocal Display", "Vergence Accommodation Conflict" ], "authors": [ { "givenName": "Sorashi", "surname": "Kimura", "fullName": "Sorashi Kimura", "affiliation": "Graduate School of Engineering Science, Osaka University, Osaka, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Daisuke", "surname": "Iwai", "fullName": "Daisuke Iwai", "affiliation": "Graduate School of Engineering Science, Osaka University, Osaka, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Parinya", "surname": "Punpongsanon", "fullName": "Parinya Punpongsanon", "affiliation": "Graduate School of Engineering Science, Osaka University, Osaka, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Kosuke", "surname": "Sato", "fullName": "Kosuke Sato", "affiliation": "Graduate School of Engineering Science, Osaka University, Osaka, Japan", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "11", "pubDate": "2021-11-01 00:00:00", "pubType": "trans", "pages": "4256-4266", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iccvw/2011/0063/0/06130263", "title": "Optimum alignment of panoramic images for stereoscopic navigation in image-based telepresence systems", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2011/06130263/12OmNAtst7B", "parentPublication": { "id": "proceedings/iccvw/2011/0063/0", "title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2017/0733/0/0733b295", "title": "The Stereoscopic Zoom", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2017/0733b295/12OmNrkT7ID", "parentPublication": { "id": "proceedings/cvprw/2017/0733/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836486", "title": "Modeling Physical Structure as Additional Constraints for Stereoscopic Optical See-Through Head-Mounted Display Calibration", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836486/12OmNx7XH8d", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2015/6026/1/07163164", "title": "3D interaction design: Increasing the stimulus-response correspondence by using stereoscopic vision", "doi": null, "abstractUrl": "/proceedings-article/fg/2015/07163164/12OmNxR5UKi", "parentPublication": { "id": "proceedings/fg/2015/6026/5", "title": "2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/c5/2012/1009/0/06195218", "title": "Using the Phantogram Technique for a Collaborative Stereoscopic Multitouch Tabletop Game", "doi": null, "abstractUrl": "/proceedings-article/c5/2012/06195218/12OmNxRnvSa", "parentPublication": { "id": "proceedings/c5/2012/1009/0", "title": "Tenth International Conference on Creating, Connecting and Collaborating through Computing (C5 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2013/2840/0/2840a073", "title": "Joint Subspace Stabilization for Stereoscopic Video", "doi": null, "abstractUrl": "/proceedings-article/iccv/2013/2840a073/12OmNxT56Af", "parentPublication": { "id": "proceedings/iccv/2013/2840/0", "title": "2013 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2013/3211/0/3211a378", "title": "A Stereoscopic CG System with Motion Parallax and Its Digital Contents for Science Museums", "doi": null, "abstractUrl": "/proceedings-article/sitis/2013/3211a378/12OmNxYL5gS", "parentPublication": { "id": "proceedings/sitis/2013/3211/0", "title": "2013 International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2012/1611/0/06238901", "title": "Keystone correction for stereoscopic cinematography", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2012/06238901/12OmNylboM1", "parentPublication": { "id": "proceedings/cvprw/2012/1611/0", "title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446222", "title": "A Method of View-Dependent Stereoscopic Projection on Curved Screen", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446222/13bd1gCd7Sx", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800e583", "title": "Separating Particulate Matter From a Single Microscopic Image", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800e583/1m3nGy9Tflm", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09523840", "articleId": "1wpqvrW88O4", "__typename": "AdjacentArticleType" }, "next": { "fno": "09523832", "articleId": "1wpqjiNuSqY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1y2Fp5OP9tK", "name": "ttg202111-09523847s1-supp1-3106486.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202111-09523847s1-supp1-3106486.mp4", "extension": "mp4", "size": "16.9 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1y2FkV9ZFKM", "title": "Nov.", "year": "2021", "issueNum": "11", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Nov.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1wpqjiNuSqY", "doi": "10.1109/TVCG.2021.3106432", "abstract": "We present a new approach for redirected walking in static and dynamic scenes that uses techniques from robot motion planning to compute the redirection gains that steer the user on collision-free paths in the physical space. Our first contribution is a mathematical framework for redirected walking using concepts from motion planning and configuration spaces. This framework highlights various geometric and perceptual constraints that tend to make collision-free redirected walking difficult. We use our framework to propose an efficient solution to the redirection problem that uses the notion of visibility polygons to compute the free spaces in the physical environment and the virtual environment. The visibility polygon provides a concise representation of the entire space that is visible, and therefore walkable, to the user from their position within an environment. Using this representation of walkable space, we apply redirected walking to steer the user to regions of the visibility polygon in the physical environment that closely match the region that the user occupies in the visibility polygon in the virtual environment. We show that our algorithm is able to steer the user along paths that result in significantly fewer resets than existing state-of-the-art algorithms in both static and dynamic scenes. Our project website is available at <uri>https://ganuna.umd.edu/vis.poly/</uri>.", "abstracts": [ { "abstractType": "Regular", "content": "We present a new approach for redirected walking in static and dynamic scenes that uses techniques from robot motion planning to compute the redirection gains that steer the user on collision-free paths in the physical space. Our first contribution is a mathematical framework for redirected walking using concepts from motion planning and configuration spaces. This framework highlights various geometric and perceptual constraints that tend to make collision-free redirected walking difficult. We use our framework to propose an efficient solution to the redirection problem that uses the notion of visibility polygons to compute the free spaces in the physical environment and the virtual environment. The visibility polygon provides a concise representation of the entire space that is visible, and therefore walkable, to the user from their position within an environment. Using this representation of walkable space, we apply redirected walking to steer the user to regions of the visibility polygon in the physical environment that closely match the region that the user occupies in the visibility polygon in the virtual environment. We show that our algorithm is able to steer the user along paths that result in significantly fewer resets than existing state-of-the-art algorithms in both static and dynamic scenes. Our project website is available at <uri>https://ganuna.umd.edu/vis.poly/</uri>.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a new approach for redirected walking in static and dynamic scenes that uses techniques from robot motion planning to compute the redirection gains that steer the user on collision-free paths in the physical space. Our first contribution is a mathematical framework for redirected walking using concepts from motion planning and configuration spaces. This framework highlights various geometric and perceptual constraints that tend to make collision-free redirected walking difficult. We use our framework to propose an efficient solution to the redirection problem that uses the notion of visibility polygons to compute the free spaces in the physical environment and the virtual environment. The visibility polygon provides a concise representation of the entire space that is visible, and therefore walkable, to the user from their position within an environment. Using this representation of walkable space, we apply redirected walking to steer the user to regions of the visibility polygon in the physical environment that closely match the region that the user occupies in the visibility polygon in the virtual environment. We show that our algorithm is able to steer the user along paths that result in significantly fewer resets than existing state-of-the-art algorithms in both static and dynamic scenes. Our project website is available at https://ganuna.umd.edu/vis.poly/.", "title": "Redirected Walking in Static and Dynamic Scenes Using Visibility Polygons", "normalizedTitle": "Redirected Walking in Static and Dynamic Scenes Using Visibility Polygons", "fno": "09523832", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Collision Avoidance", "Computational Geometry", "Control Engineering Computing", "Mathematics Computing", "Mobile Robots", "Virtual Reality", "Visibility Polygon", "Static Scenes", "Dynamic Scenes", "Redirection Gains", "Collision Free Paths", "Physical Space", "Mathematical Framework", "Motion Planning", "Configuration Spaces", "Framework Highlights", "Perceptual Constraints", "Redirection Problem", "Free Spaces", "Physical Environment", "Virtual Environment", "Entire Space", "Walkable Space", "Collision Free Redirected Walking", "Robot Motion Planning", "Legged Locomotion", "Virtual Environments", "Heuristic Algorithms", "Aerospace Electronics", "Planning", "Space Vehicles", "Robots", "Redirected Walking", "Locomotion", "Alignment", "Visibility Polygon", "Isovist", "Motion Planning" ], "authors": [ { "givenName": "Niall L.", "surname": "Williams", "fullName": "Niall L. Williams", "affiliation": "University of Maryland, College Park, US", "__typename": "ArticleAuthorType" }, { "givenName": "Aniket", "surname": "Bera", "fullName": "Aniket Bera", "affiliation": "University of Maryland, College Park, US", "__typename": "ArticleAuthorType" }, { "givenName": "Dinesh", "surname": "Manocha", "fullName": "Dinesh Manocha", "affiliation": "University of Maryland, College Park, US", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "11", "pubDate": "2021-11-01 00:00:00", "pubType": "trans", "pages": "4267-4277", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/pive/2012/1218/0/06229795", "title": "Adaptive redirected walking in a virtual world", "doi": null, "abstractUrl": "/proceedings-article/pive/2012/06229795/12OmNzUxOk4", "parentPublication": { "id": "proceedings/pive/2012/1218/0", "title": "2012 IEEE VR Workshop on Perceptual Illusions in Virtual Environments", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446579", "title": "Leveraging Configuration Spaces and Navigation Functions for Redirected Walking", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446579/13bd1fdV4lq", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446263", "title": "Mobius Walker: Pitch and Roll Redirected Walking", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446263/13bd1gJ1v07", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/04/ttg201404579", "title": "Performance of Redirected Walking Algorithms in a Constrained Virtual World", "doi": null, "abstractUrl": "/journal/tg/2014/04/ttg201404579/13rRUwjoNx4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/04/ttg2013040634", "title": "Comparing Four Approaches to Generalized Redirected Walking: Simulation and Live User Data", "doi": null, "abstractUrl": "/journal/tg/2013/04/ttg2013040634/13rRUx0Pqpx", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/04/07036075", "title": "Cognitive Resource Demands of Redirected Walking", "doi": null, "abstractUrl": "/journal/tg/2015/04/07036075/13rRUxcKzVm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09733261", "title": "One-step out-of-place resetting for redirected walking in VR", "doi": null, "abstractUrl": "/journal/tg/5555/01/09733261/1BENJyPkx5S", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a053", "title": "Redirected Walking Based on Historical User Walking Data", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a053/1MNgUnNG7Ju", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798121", "title": "Real-time Optimal Planning for Redirected Walking Using Deep Q-Learning", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798121/1cJ17Y60ruM", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090595", "title": "Reactive Alignment of Virtual and Physical Environments Using Redirected Walking", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090595/1jIxm1j8B2w", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09523847", "articleId": "1wpqmNfLX9e", "__typename": "AdjacentArticleType" }, "next": { "fno": "09523890", "articleId": "1wpqBpgOKUE", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1y2Fl4EHJwA", "name": "ttg202111-09523832s1-supp1-3106432.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202111-09523832s1-supp1-3106432.pdf", "extension": "pdf", "size": "132 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }