data
dict
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1jrU0RsEpnG", "doi": "10.1109/TVCG.2020.2973745", "abstract": "Presents the introductory editorial for this issue of the publication.", "abstracts": [ { "abstractType": "Regular", "content": "Presents the introductory editorial for this issue of the publication.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents the introductory editorial for this issue of the publication.", "title": "Editor's Note", "normalizedTitle": "Editor's Note", "fno": "09082801", "hasPdf": true, "idPrefix": "tg", "keywords": [], "authors": [ { "givenName": "Klaus", "surname": "Mueller", "fullName": "Klaus Mueller", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2135-2141", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "mags/an/2022/03/09875139", "title": "From the Editor's Desk", "doi": null, "abstractUrl": "/magazine/an/2022/03/09875139/1GlbXTIEwaQ", "parentPublication": { "id": "mags/an", "title": "IEEE Annals of the History of Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/an/2022/04/09972860", "title": "From the Editor's Desk", "doi": null, "abstractUrl": "/magazine/an/2022/04/09972860/1ISVNzFCZu8", "parentPublication": { "id": "mags/an", "title": "IEEE Annals of the History of Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2020/02/08956009", "title": "Editor's Note", "doi": null, "abstractUrl": "/journal/td/2020/02/08956009/1gtJY06WATe", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/an/2020/01/09031986", "title": "From the Editor's Desk", "doi": null, "abstractUrl": "/magazine/an/2020/01/09031986/1i6VhktGnkc", "parentPublication": { "id": "mags/an", "title": "IEEE Annals of the History of Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/an/2020/02/09103673", "title": "From the Editor's Desk", "doi": null, "abstractUrl": "/magazine/an/2020/02/09103673/1keqEV28ioE", "parentPublication": { "id": "mags/an", "title": "IEEE Annals of the History of Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2021/04/09257115", "title": "Editor's Note", "doi": null, "abstractUrl": "/journal/td/2021/04/09257115/1oFCKncAhqM", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/an/2020/04/09263260", "title": "From the Editor's Desk", "doi": null, "abstractUrl": "/magazine/an/2020/04/09263260/1oReM0ot75m", "parentPublication": { "id": "mags/an", "title": "IEEE Annals of the History of Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2021/10/09408530", "title": "Editor's Note", "doi": null, "abstractUrl": "/journal/td/2021/10/09408530/1sVEVpV9zNK", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/an/2021/03/09546090", "title": "From the Editor's Desk", "doi": null, "abstractUrl": "/magazine/an/2021/03/09546090/1x6zEFuXbH2", "parentPublication": { "id": "mags/an", "title": "IEEE Annals of the History of Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/12/09586410", "title": "Editor's Note", "doi": null, "abstractUrl": "/journal/tg/2021/12/09586410/1y11sTji3vO", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": null, "next": { "fno": "09082802", "articleId": "1jrTVLo1tpC", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1jrTVLo1tpC", "doi": "10.1109/TVCG.2020.2974638", "abstract": "The five papers in this special section were from the 2020 IEEE Pacific Visualization Symposium (IEEE PacificVis), which was scheduled to be hosted by Tianjin University and held in Tianjin, China, from April 14 to 17, 2020.", "abstracts": [ { "abstractType": "Regular", "content": "The five papers in this special section were from the 2020 IEEE Pacific Visualization Symposium (IEEE PacificVis), which was scheduled to be hosted by Tianjin University and held in Tianjin, China, from April 14 to 17, 2020.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The five papers in this special section were from the 2020 IEEE Pacific Visualization Symposium (IEEE PacificVis), which was scheduled to be hosted by Tianjin University and held in Tianjin, China, from April 14 to 17, 2020.", "title": "Guest Editors’ Introduction: Special Section on IEEE PacificVis 2020", "normalizedTitle": "Guest Editors’ Introduction: Special Section on IEEE PacificVis 2020", "fno": "09082802", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Special Issues And Sections", "Meetings", "Visualization", "Computer Graphics" ], "authors": [ { "givenName": "Fabian", "surname": "Beck", "fullName": "Fabian Beck", "affiliation": "Paluno - The Ruhr Institute for Software Technology, University of Duisburg-Essen, Duisburg, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Jinwook", "surname": "Seo", "fullName": "Jinwook Seo", "affiliation": "Department of Computer Science and Engineering, Seoul National University, Seoul, Korea", "__typename": "ArticleAuthorType" }, { "givenName": "Chaoli", "surname": "Wang", "fullName": "Chaoli Wang", "affiliation": "Department of Computer Science and Engineering, University of Notre Dame, Notre Dame, IN, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2142-2143", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tg/2013/06/ttg2013060898", "title": "Guest Editors' Introduction: Special Section on the IEEE Pacific Visualization Symposium 2012", "doi": null, "abstractUrl": "/journal/tg/2013/06/ttg2013060898/13rRUNvgziD", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/08/07138667", "title": "Guest Editors’ Introduction: Special Section on the IEEE Pacific Visualization Symposium 2014", "doi": null, "abstractUrl": "/journal/tg/2015/08/07138667/13rRUwI5Ugf", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/08/06847259", "title": "Guest Editors' Introduction: Special Section on the IEEE Pacific Visualization Symposium", "doi": null, "abstractUrl": "/journal/tg/2014/08/06847259/13rRUxD9gXJ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/06/08352605", "title": "Guest Editors’ Introduction: Special Section on IEEE PacificVis 2018", "doi": null, "abstractUrl": "/journal/tg/2018/06/08352605/13rRUxlgxOp", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/06/08703194", "title": "Guest Editors' Introduction: Special Section on IEEE PacificVis 2019", "doi": null, "abstractUrl": "/journal/tg/2019/06/08703194/19Er7j5Ad7a", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tb/2022/01/09702708", "title": "Guest Editors’ Introduction to the Special Section on Bioinformatics Research and Applications", "doi": null, "abstractUrl": "/journal/tb/2022/01/09702708/1AH375DQaGY", "parentPublication": { "id": "trans/tb", "title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/06/09766260", "title": "Guest Editors' Introduction: Special Section on IEEE PacificVis 2022", "doi": null, "abstractUrl": "/journal/tg/2022/06/09766260/1D34QjpFGyQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tb/2022/03/09788108", "title": "Editorial", "doi": null, "abstractUrl": "/journal/tb/2022/03/09788108/1DU9k5pRa4o", "parentPublication": { "id": "trans/tb", "title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2020/07/09108341", "title": "Guest Editors' Introduction to the Special Section on Computational Photography", "doi": null, "abstractUrl": "/journal/tp/2020/07/09108341/1koL3gQqTHa", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/06/09430173", "title": "Guest Editors' Introduction: Special Section on IEEE PacificVis 2021", "doi": null, "abstractUrl": "/journal/tg/2021/06/09430173/1tzuiF6azcs", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09082801", "articleId": "1jrU0RsEpnG", "__typename": "AdjacentArticleType" }, "next": { "fno": "08978585", "articleId": "1haUx0fpghW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1haUx0fpghW", "doi": "10.1109/TVCG.2020.2970509", "abstract": "When point clouds are labeled in information visualization applications, sophisticated guidelines as in cartography do not yet exist. Existing naive strategies may mislead as to which points belong to which label. To inform improved strategies, we studied factors influencing this phenomenon. We derived a class of labeled point cloud representations from existing applications and we defined different models predicting how humans interpret such complex representations, focusing on their geometric properties. We conducted an empirical study, in which participants had to relate dots to labels in order to evaluate how well our models predict. Our results indicate that presence of point clusters, label size, and angle to the label have an effect on participants' judgment as well as that the distance measure types considered perform differently discouraging the use of label centers as reference points.", "abstracts": [ { "abstractType": "Regular", "content": "When point clouds are labeled in information visualization applications, sophisticated guidelines as in cartography do not yet exist. Existing naive strategies may mislead as to which points belong to which label. To inform improved strategies, we studied factors influencing this phenomenon. We derived a class of labeled point cloud representations from existing applications and we defined different models predicting how humans interpret such complex representations, focusing on their geometric properties. We conducted an empirical study, in which participants had to relate dots to labels in order to evaluate how well our models predict. Our results indicate that presence of point clusters, label size, and angle to the label have an effect on participants' judgment as well as that the distance measure types considered perform differently discouraging the use of label centers as reference points.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "When point clouds are labeled in information visualization applications, sophisticated guidelines as in cartography do not yet exist. Existing naive strategies may mislead as to which points belong to which label. To inform improved strategies, we studied factors influencing this phenomenon. We derived a class of labeled point cloud representations from existing applications and we defined different models predicting how humans interpret such complex representations, focusing on their geometric properties. We conducted an empirical study, in which participants had to relate dots to labels in order to evaluate how well our models predict. Our results indicate that presence of point clusters, label size, and angle to the label have an effect on participants' judgment as well as that the distance measure types considered perform differently discouraging the use of label centers as reference points.", "title": "Modeling How Humans Judge Dot-Label Relations in Point Cloud Visualizations", "normalizedTitle": "Modeling How Humans Judge Dot-Label Relations in Point Cloud Visualizations", "fno": "08978585", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Cartography", "Data Visualisation", "Distance Measurement", "Point Cloud Visualizations", "Information Visualization Applications", "Labeled Point Cloud Representations", "Complex Representations", "Point Clusters", "Label Size", "Label Centers", "Dot Label Relations", "Visualization", "Three Dimensional Displays", "Labeling", "Task Analysis", "Predictive Models", "Urban Areas", "Lenses", "Human Judgment Model", "Document Visualization", "Label Placement" ], "authors": [ { "givenName": "Martin", "surname": "Reckziegel", "fullName": "Martin Reckziegel", "affiliation": "Leipzig University", "__typename": "ArticleAuthorType" }, { "givenName": "Linda", "surname": "Pfeiffer", "fullName": "Linda Pfeiffer", "affiliation": "German Aerospace Center DLR", "__typename": "ArticleAuthorType" }, { "givenName": "Christian", "surname": "Heine", "fullName": "Christian Heine", "affiliation": "Leipzig University", "__typename": "ArticleAuthorType" }, { "givenName": "Stefan", "surname": "Jänicke", "fullName": "Stefan Jänicke", "affiliation": "University of Southern Denmark", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2144-2155", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icdmw/2017/3800/0/3800a850", "title": "Combining Active Learning and Semi-Supervised Learning by Using Selective Label Spreading", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2017/3800a850/12OmNvFHfGd", "parentPublication": { "id": "proceedings/icdmw/2017/3800/0", "title": "2017 IEEE International Conference on Data Mining Workshops (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/06/ttg2008061237", "title": "Particle-based labeling: Fast point-feature labeling without obscuring other visual features", "doi": null, "abstractUrl": "/journal/tg/2008/06/ttg2008061237/13rRUwbaqUM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07539393", "title": "An Evaluation of Visual Search Support in Maps", "doi": null, "abstractUrl": "/journal/tg/2017/01/07539393/13rRUwjGoLK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2019/1975/0/197500b743", "title": "IDD: A Dataset for Exploring Problems of Autonomous Navigation in Unconstrained Environments", "doi": null, "abstractUrl": "/proceedings-article/wacv/2019/197500b743/18j8NGRjKve", "parentPublication": { "id": "proceedings/wacv/2019/1975/0", "title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ase/2021/0337/0/033700a943", "title": "Unsupervised Labeling and Extraction of Phrase-based Concepts in Vulnerability Descriptions", "doi": null, "abstractUrl": "/proceedings-article/ase/2021/033700a943/1AjTfGOSCwU", "parentPublication": { "id": "proceedings/ase/2021/0337/0", "title": "2021 36th IEEE/ACM International Conference on Automated Software Engineering (ASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09904455", "title": "Multiple Forecast Visualizations (MFVs): Trade-offs in Trust and Performance in Multiple COVID-19 Forecast Visualizations", "doi": null, "abstractUrl": "/journal/tg/2023/01/09904455/1H1gjlaBqVO", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09917516", "title": "Geo-Storylines: Integrating Maps into Storyline Visualizations", "doi": null, "abstractUrl": "/journal/tg/2023/01/09917516/1HrexIf2zZe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ai/5555/01/10113715", "title": "Noisy Label Detection and Counterfactual Correction", "doi": null, "abstractUrl": "/journal/ai/5555/01/10113715/1MNbV9nYrXq", "parentPublication": { "id": "trans/ai", "title": "IEEE Transactions on Artificial Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08809750", "title": "Pattern-Driven Navigation in 2D Multiscale Visualizations with Scalable Insets", "doi": null, "abstractUrl": "/journal/tg/2020/01/08809750/1cHEu5CRoFq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/12/09573413", "title": "Adaptive Graph Guided Disambiguation for Partial Label Learning", "doi": null, "abstractUrl": "/journal/tp/2022/12/09573413/1xH5E3Yjgek", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09082802", "articleId": "1jrTVLo1tpC", "__typename": "AdjacentArticleType" }, "next": { "fno": "09035636", "articleId": "1iaeBQ4H756", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1iaeBQ4H756", "doi": "10.1109/TVCG.2020.2970522", "abstract": "We propose a photographic method to show scalar values of high dynamic range (HDR) by color mapping for 2D visualization. We combine (1) tone-mapping operators that transform the data to the display range of the monitor while preserving perceptually important features, based on a systematic evaluation, and (2) simulated glares that highlight high-value regions. Simulated glares are effective for highlighting small areas (of a few pixels) that may not be visible with conventional visualizations; through a controlled perception study, we confirm that glare is preattentive. The usefulness of our overall photographic HDR visualization is validated through the feedback of expert users.", "abstracts": [ { "abstractType": "Regular", "content": "We propose a photographic method to show scalar values of high dynamic range (HDR) by color mapping for 2D visualization. We combine (1) tone-mapping operators that transform the data to the display range of the monitor while preserving perceptually important features, based on a systematic evaluation, and (2) simulated glares that highlight high-value regions. Simulated glares are effective for highlighting small areas (of a few pixels) that may not be visible with conventional visualizations; through a controlled perception study, we confirm that glare is preattentive. The usefulness of our overall photographic HDR visualization is validated through the feedback of expert users.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose a photographic method to show scalar values of high dynamic range (HDR) by color mapping for 2D visualization. We combine (1) tone-mapping operators that transform the data to the display range of the monitor while preserving perceptually important features, based on a systematic evaluation, and (2) simulated glares that highlight high-value regions. Simulated glares are effective for highlighting small areas (of a few pixels) that may not be visible with conventional visualizations; through a controlled perception study, we confirm that glare is preattentive. The usefulness of our overall photographic HDR visualization is validated through the feedback of expert users.", "title": "Photographic High-Dynamic-Range Scalar Visualization", "normalizedTitle": "Photographic High-Dynamic-Range Scalar Visualization", "fno": "09035636", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualisation", "Image Colour Analysis", "Color Mapping", "Simulated Glares", "Photographic HDR Visualization", "Photographic High Dynamic Range Scalar Visualization", "2 D Visualization", "Tone Mapping Operators", "Data Visualization", "Image Color Analysis", "Pipelines", "Dynamic Range", "Visualization", "Two Dimensional Displays", "Monitoring", "Tone Mapping", "Glare", "High Dynamic Range Visualization", "2 D Diagrams" ], "authors": [ { "givenName": "Liang", "surname": "Zhou", "fullName": "Liang Zhou", "affiliation": "SCI Institute, University of Utah, Salt Lake City, UT, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Marc", "surname": "Rivinius", "fullName": "Marc Rivinius", "affiliation": "Visualization Research Center (VISUS), University of Stuttgart, Stuttgart, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Chris R.", "surname": "Johnson", "fullName": "Chris R. Johnson", "affiliation": "SCI Institute, University of Utah, Salt Lake City, UT, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Daniel", "surname": "Weiskopf", "fullName": "Daniel Weiskopf", "affiliation": "Visualization Research Center (VISUS), University of Stuttgart, Stuttgart, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2156-2167", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/aipr/2014/5921/0/07041912", "title": "High dynamic range (HDR) video processing for the exploitation of high bit-depth sensors in human-monitored surveillance", "doi": null, "abstractUrl": "/proceedings-article/aipr/2014/07041912/12OmNA14Aip", "parentPublication": { "id": "proceedings/aipr/2014/5921/0", "title": "2014 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csie/2009/3507/6/3507f583", "title": "Color Vision Based High Dynamic Range Images Rendering", "doi": null, "abstractUrl": "/proceedings-article/csie/2009/3507f583/12OmNCctfaE", "parentPublication": { "id": null, "title": null, "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2009/4534/0/05559003", "title": "Artifact-free High Dynamic Range imaging", "doi": null, "abstractUrl": "/proceedings-article/iccp/2009/05559003/12OmNCuDzub", "parentPublication": { "id": "proceedings/iccp/2009/4534/0", "title": "IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcc/2016/1853/0/07786173", "title": "High Dynamic Range Video Coding with Backward Compatibility", "doi": null, "abstractUrl": "/proceedings-article/dcc/2016/07786173/12OmNxcMSkC", "parentPublication": { "id": "proceedings/dcc/2016/1853/0", "title": "2016 Data Compression Conference (DCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icis/2016/0806/0/07550796", "title": "High dynamic range image composition using a linear interpolation approach", "doi": null, "abstractUrl": "/proceedings-article/icis/2016/07550796/12OmNxw5Bpw", "parentPublication": { "id": "proceedings/icis/2016/0806/0", "title": "2016 IEEE/ACIS 15th International Conference on Computer and Information Science (ICIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2004/2177/0/21770269", "title": "Two-Channel Technique for High Dynamic Range Image Visualization", "doi": null, "abstractUrl": "/proceedings-article/iv/2004/21770269/12OmNyo1nMX", "parentPublication": { "id": "proceedings/iv/2004/2177/0", "title": "Proceedings. Eighth International Conference on Information Visualisation, 2004. IV 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900a546", "title": "Multi-Bracket High Dynamic Range Imaging with Event Cameras", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900a546/1G56FK3UGPe", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900b031", "title": "Gamma-enhanced Spatial Attention Network for Efficient High Dynamic Range Imaging", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900b031/1G56nGzWShG", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900h706", "title": "Neural Auto-Exposure for High-Dynamic Range Object Detection", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900h706/1yeJuGu5Xvq", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900g293", "title": "End-to-end High Dynamic Range Camera Pipeline Optimization", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900g293/1yeK6nSzK1y", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08978585", "articleId": "1haUx0fpghW", "__typename": "AdjacentArticleType" }, "next": { "fno": "08977320", "articleId": "1h2AIkwYg4E", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1h2AIkwYg4E", "doi": "10.1109/TVCG.2020.2970512", "abstract": "Interaction plays a vital role during visual network exploration as users need to engage with both elements in the view (e.g., nodes, links) and interface controls (e.g., sliders, dropdown menus). Particularly as the size and complexity of a network grow, interactive displays supporting multimodal input (e.g., touch, speech, pen, gaze) exhibit the potential to facilitate fluid interaction during visual network exploration and analysis. While multimodal interaction with network visualization seems like a promising idea, many open questions remain. For instance, do users actually prefer multimodal input over unimodal input, and if so, why? Does it enable them to interact more naturally, or does having multiple modes of input confuse users? To answer such questions, we conducted a qualitative user study in the context of a network visualization tool, comparing speech- and touch-based unimodal interfaces to a multimodal interface combining the two. Our results confirm that participants strongly prefer multimodal input over unimodal input attributing their preference to: 1) the freedom of expression, 2) the complementary nature of speech and touch, and 3) integrated interactions afforded by the combination of the two modalities. We also describe the interaction patterns participants employed to perform common network visualization operations and highlight themes for future multimodal network visualization systems to consider.", "abstracts": [ { "abstractType": "Regular", "content": "Interaction plays a vital role during visual network exploration as users need to engage with both elements in the view (e.g., nodes, links) and interface controls (e.g., sliders, dropdown menus). Particularly as the size and complexity of a network grow, interactive displays supporting multimodal input (e.g., touch, speech, pen, gaze) exhibit the potential to facilitate fluid interaction during visual network exploration and analysis. While multimodal interaction with network visualization seems like a promising idea, many open questions remain. For instance, do users actually prefer multimodal input over unimodal input, and if so, why? Does it enable them to interact more naturally, or does having multiple modes of input confuse users? To answer such questions, we conducted a qualitative user study in the context of a network visualization tool, comparing speech- and touch-based unimodal interfaces to a multimodal interface combining the two. Our results confirm that participants strongly prefer multimodal input over unimodal input attributing their preference to: 1) the freedom of expression, 2) the complementary nature of speech and touch, and 3) integrated interactions afforded by the combination of the two modalities. We also describe the interaction patterns participants employed to perform common network visualization operations and highlight themes for future multimodal network visualization systems to consider.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Interaction plays a vital role during visual network exploration as users need to engage with both elements in the view (e.g., nodes, links) and interface controls (e.g., sliders, dropdown menus). Particularly as the size and complexity of a network grow, interactive displays supporting multimodal input (e.g., touch, speech, pen, gaze) exhibit the potential to facilitate fluid interaction during visual network exploration and analysis. While multimodal interaction with network visualization seems like a promising idea, many open questions remain. For instance, do users actually prefer multimodal input over unimodal input, and if so, why? Does it enable them to interact more naturally, or does having multiple modes of input confuse users? To answer such questions, we conducted a qualitative user study in the context of a network visualization tool, comparing speech- and touch-based unimodal interfaces to a multimodal interface combining the two. Our results confirm that participants strongly prefer multimodal input over unimodal input attributing their preference to: 1) the freedom of expression, 2) the complementary nature of speech and touch, and 3) integrated interactions afforded by the combination of the two modalities. We also describe the interaction patterns participants employed to perform common network visualization operations and highlight themes for future multimodal network visualization systems to consider.", "title": "Touch? Speech? or Touch and Speech? Investigating Multimodal Interaction for Visual Network Exploration and Analysis", "normalizedTitle": "Touch? Speech? or Touch and Speech? Investigating Multimodal Interaction for Visual Network Exploration and Analysis", "fno": "08977320", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualisation", "Graphical User Interfaces", "Mobile Handsets", "Visual Network Exploration", "Interactive Displays", "Multimodal Input", "Network Visualization Tool", "Multimodal Interface", "Interaction Patterns Participants", "Multimodal Network Visualization Systems", "Network Visualization Operations", "Visualization", "Encoding", "Tools", "Data Visualization", "Speech Recognition", "Natural Languages", "Task Analysis", "Multimodal Interaction", "Network Visualizations", "Natural Language Interfaces" ], "authors": [ { "givenName": "Ayshwarya", "surname": "Saktheeswaran", "fullName": "Ayshwarya Saktheeswaran", "affiliation": "Georgia Institute of Technology, Atlanta, GA", "__typename": "ArticleAuthorType" }, { "givenName": "Arjun", "surname": "Srinivasan", "fullName": "Arjun Srinivasan", "affiliation": "Georgia Institute of Technology, Atlanta, GA", "__typename": "ArticleAuthorType" }, { "givenName": "John", "surname": "Stasko", "fullName": "John Stasko", "affiliation": "Georgia Institute of Technology, Atlanta, GA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2168-2179", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cscs/2017/1839/0/07968566", "title": "Multimodal Interface for Ambient Assisted Living", "doi": null, "abstractUrl": "/proceedings-article/cscs/2017/07968566/12OmNARRYpY", "parentPublication": { "id": "proceedings/cscs/2017/1839/0", "title": "2017 21st International Conference on Control Systems and Computer Science (CSCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2003/1890/0/18900151", "title": "Relative Performance Using Haptic and/or Touch-Produced Auditory Cues in a Remote Absolute Texture Identification Task", "doi": null, "abstractUrl": "/proceedings-article/haptics/2003/18900151/12OmNzDehah", "parentPublication": { "id": "proceedings/haptics/2003/1890/0", "title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/01/08019860", "title": "Orko: Facilitating Multimodal Interaction for Visual Exploration and Analysis of Networks", "doi": null, "abstractUrl": "/journal/tg/2018/01/08019860/13rRUx0gefo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900e578", "title": "Improving Multimodal Speech Recognition by Data Augmentation and Speech Representations", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900e578/1G561ezEc9O", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09894081", "title": "Putting Vision and Touch Into Conflict: Results from a Multimodal Mixed Reality Setup", "doi": null, "abstractUrl": "/journal/tg/5555/01/09894081/1GIqtQDhf8I", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cmbs/2022/6770/0/677000a199", "title": "Leveraging Clinical BERT in Multimodal Mortality Prediction Models for COVID-19", "doi": null, "abstractUrl": "/proceedings-article/cmbs/2022/677000a199/1GhW8bBO4iQ", "parentPublication": { "id": "proceedings/cmbs/2022/6770/0", "title": "2022 IEEE 35th International Symposium on Computer-Based Medical Systems (CBMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/08/09023002", "title": "Interweaving Multimodal Interaction With Flexible Unit Visualizations for Data Exploration", "doi": null, "abstractUrl": "/journal/tg/2021/08/09023002/1hTHRTEQgRG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093414", "title": "Exploring Hate Speech Detection in Multimodal Publications", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093414/1jPbxi0Vk40", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800n3286", "title": "MMTM: Multimodal Transfer Module for CNN Fusion", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800n3286/1m3ojQrj4iY", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2020/9134/0/913400a714", "title": "MIVA: Multimodal Interactions for Facilitating Visual Analysis with Multiple Coordinated Views", "doi": null, "abstractUrl": "/proceedings-article/iv/2020/913400a714/1rSR8lx5snS", "parentPublication": { "id": "proceedings/iv/2020/9134/0", "title": "2020 24th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09035636", "articleId": "1iaeBQ4H756", "__typename": "AdjacentArticleType" }, "next": { "fno": "08977505", "articleId": "1h2AIHeB46A", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1h2AIHeB46A", "doi": "10.1109/TVCG.2020.2970523", "abstract": "Graph drawing readability metrics are routinely used to assess and create node-link layouts of network data. Existing readability metrics fall short in three ways. The many count-based metrics such as edge-edge or node-edge crossings simply provide integer counts, missing the opportunity to quantify the amount of overlap between items, which may vary in size, at a more fine-grained level. Current metrics focus solely on single-level topological structure, ignoring the possibility of multi-level structure such as large and thus highly salient metanodes. Most current metrics focus on the measurement of clutter in the form of crossings and overlaps, and do not take into account the trade-off between the clutter and the information sparsity of the drawing, which we refer to as sprawl. We propose an area-aware approach to clutter metrics that tracks the extent of geometric overlaps between node-node, node-edge, and edge-edge pairs in detail. It handles variable-size nodes and explicitly treats metanodes and leaf nodes uniformly. We call the combination of a sprawl metric and an area-aware clutter metric a sprawlter metric. We present an instantiation of the sprawlter metrics featuring a formal and thorough discussion of the crucial component, the penalty mapping function. We implement and validate our proposed metrics with extensive computational analysis of graph layouts, considering four layout algorithms and 56 layouts encompassing both real-world data and synthetic examples illustrating specific configurations of interest.", "abstracts": [ { "abstractType": "Regular", "content": "Graph drawing readability metrics are routinely used to assess and create node-link layouts of network data. Existing readability metrics fall short in three ways. The many count-based metrics such as edge-edge or node-edge crossings simply provide integer counts, missing the opportunity to quantify the amount of overlap between items, which may vary in size, at a more fine-grained level. Current metrics focus solely on single-level topological structure, ignoring the possibility of multi-level structure such as large and thus highly salient metanodes. Most current metrics focus on the measurement of clutter in the form of crossings and overlaps, and do not take into account the trade-off between the clutter and the information sparsity of the drawing, which we refer to as sprawl. We propose an area-aware approach to clutter metrics that tracks the extent of geometric overlaps between node-node, node-edge, and edge-edge pairs in detail. It handles variable-size nodes and explicitly treats metanodes and leaf nodes uniformly. We call the combination of a sprawl metric and an area-aware clutter metric a sprawlter metric. We present an instantiation of the sprawlter metrics featuring a formal and thorough discussion of the crucial component, the penalty mapping function. We implement and validate our proposed metrics with extensive computational analysis of graph layouts, considering four layout algorithms and 56 layouts encompassing both real-world data and synthetic examples illustrating specific configurations of interest.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Graph drawing readability metrics are routinely used to assess and create node-link layouts of network data. Existing readability metrics fall short in three ways. The many count-based metrics such as edge-edge or node-edge crossings simply provide integer counts, missing the opportunity to quantify the amount of overlap between items, which may vary in size, at a more fine-grained level. Current metrics focus solely on single-level topological structure, ignoring the possibility of multi-level structure such as large and thus highly salient metanodes. Most current metrics focus on the measurement of clutter in the form of crossings and overlaps, and do not take into account the trade-off between the clutter and the information sparsity of the drawing, which we refer to as sprawl. We propose an area-aware approach to clutter metrics that tracks the extent of geometric overlaps between node-node, node-edge, and edge-edge pairs in detail. It handles variable-size nodes and explicitly treats metanodes and leaf nodes uniformly. We call the combination of a sprawl metric and an area-aware clutter metric a sprawlter metric. We present an instantiation of the sprawlter metrics featuring a formal and thorough discussion of the crucial component, the penalty mapping function. We implement and validate our proposed metrics with extensive computational analysis of graph layouts, considering four layout algorithms and 56 layouts encompassing both real-world data and synthetic examples illustrating specific configurations of interest.", "title": "The Sprawlter Graph Readability Metric: Combining Sprawl and Area-Aware Clutter", "normalizedTitle": "The Sprawlter Graph Readability Metric: Combining Sprawl and Area-Aware Clutter", "fno": "08977505", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualisation", "Graph Theory", "Telecommunication Network Topology", "Node Link Layouts", "Count Based Metrics", "Node Edge Crossings", "Integer Counts", "Fine Grained Level", "Current Metrics Focus", "Single Level Topological Structure", "Multilevel Structure", "Clutter Metrics", "Geometric Overlaps", "Edge Edge Pairs", "Variable Size Nodes", "Leaf Nodes", "Sprawl Metric", "Sprawlter Metrics", "Graph Layouts", "Sprawlter Graph Readability Metric", "Graph Drawing Readability Metrics", "Area Aware Clutter Metric", "Salient Metanodes", "Measurement", "Layout", "Clutter", "Readability Metrics", "Compounds", "Visualization", "Periodic Structures", "Graph Drawing", "Graph Drawing Metrics", "Readability Metrics", "Aesthetic Criteria" ], "authors": [ { "givenName": "Zipeng", "surname": "Liu", "fullName": "Zipeng Liu", "affiliation": "University of British Columbia", "__typename": "ArticleAuthorType" }, { "givenName": "Takayuki", "surname": "Itoh", "fullName": "Takayuki Itoh", "affiliation": "Ochanomizu University", "__typename": "ArticleAuthorType" }, { "givenName": "Jessica Q.", "surname": "Dawson", "fullName": "Jessica Q. Dawson", "affiliation": "University of British Columbia", "__typename": "ArticleAuthorType" }, { "givenName": "Tamara", "surname": "Munzner", "fullName": "Tamara Munzner", "affiliation": "University of British Columbia", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2180-2191", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/pacificvis/2015/6879/0/07156354", "title": "Attribute-driven edge bundling for general graphs with applications in trail analysis", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2015/07156354/12OmNCaLEnG", "parentPublication": { "id": "proceedings/pacificvis/2015/6879/0", "title": "2015 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csit/2016/8914/0/07549476", "title": "Application domain and programming language readability yardsticks", "doi": null, "abstractUrl": "/proceedings-article/csit/2016/07549476/12OmNCfjev8", "parentPublication": { "id": "proceedings/csit/2016/8914/0", "title": "2016 7th International Conference on Computer Science and Information Technology (CSIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2011/935/0/05742389", "title": "Multilevel agglomerative edge bundling for visualizing large graphs", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2011/05742389/12OmNxj233Y", "parentPublication": { "id": "proceedings/pacificvis/2011/935/0", "title": "2011 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2012/4771/0/4771a093", "title": "Clutter Reduction in Multi-dimensional Visualization of Incomplete Data Using Sugiyama Algorithm", "doi": null, "abstractUrl": "/proceedings-article/iv/2012/4771a093/12OmNzBOhHa", "parentPublication": { "id": "proceedings/iv/2012/4771/0", "title": "2012 16th International Conference on Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/2010/04/tts2010040546", "title": "Learning a Metric for Code Readability", "doi": null, "abstractUrl": "/journal/ts/2010/04/tts2010040546/13rRUygT7gV", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/01/07192724", "title": "AmbiguityVis: Visualization of Ambiguity in Graph Layouts", "doi": null, "abstractUrl": "/journal/tg/2016/01/07192724/13rRUyuegpa", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpc/2022/9298/0/929800a214", "title": "An Empirical Investigation on the Trade-off between Smart Contract Readability and Gas Consumption", "doi": null, "abstractUrl": "/proceedings-article/icpc/2022/929800a214/1EpKH3lfMRO", "parentPublication": { "id": "proceedings/icpc/2022/9298/0", "title": "2022 IEEE/ACM 30th International Conference on Program Comprehension (ICPC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icnisc/2022/5351/0/535100a155", "title": "HED-CNN based Ionospheric Clutter Extraction for HF Range-Doppler Spectrum", "doi": null, "abstractUrl": "/proceedings-article/icnisc/2022/535100a155/1KYtoZqU3de", "parentPublication": { "id": "proceedings/icnisc/2022/5351/0", "title": "2022 8th Annual International Conference on Network and Information Systems for Computers (ICNISC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2019/04/08739137", "title": "Evaluating the Readability of Force Directed Graph Layouts: A Deep Learning Approach", "doi": null, "abstractUrl": "/magazine/cg/2019/04/08739137/1aXM6mNkouI", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08977320", "articleId": "1h2AIkwYg4E", "__typename": "AdjacentArticleType" }, "next": { "fno": "08977377", "articleId": "1h2AJ4jdnFK", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1h2AJ4jdnFK", "doi": "10.1109/TVCG.2020.2970525", "abstract": "The development of usable visualization solutions is essential for ensuring both their adoption and effectiveness. User-centered design principles, which involve users throughout the entire development process, have been shown to be effective in numerous information visualization endeavors. We describe how we applied these principles in scientific visualization over a two year collaboration to develop a hybrid in situ/post hoc solution tailored towards combustion researcher needs. Furthermore, we examine the importance of user-centered design and lessons learned over the design process in an effort to aid others seeking to develop effective scientific visualization solutions.", "abstracts": [ { "abstractType": "Regular", "content": "The development of usable visualization solutions is essential for ensuring both their adoption and effectiveness. User-centered design principles, which involve users throughout the entire development process, have been shown to be effective in numerous information visualization endeavors. We describe how we applied these principles in scientific visualization over a two year collaboration to develop a hybrid in situ/post hoc solution tailored towards combustion researcher needs. Furthermore, we examine the importance of user-centered design and lessons learned over the design process in an effort to aid others seeking to develop effective scientific visualization solutions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The development of usable visualization solutions is essential for ensuring both their adoption and effectiveness. User-centered design principles, which involve users throughout the entire development process, have been shown to be effective in numerous information visualization endeavors. We describe how we applied these principles in scientific visualization over a two year collaboration to develop a hybrid in situ/post hoc solution tailored towards combustion researcher needs. Furthermore, we examine the importance of user-centered design and lessons learned over the design process in an effort to aid others seeking to develop effective scientific visualization solutions.", "title": "A User-Centered Design Study in Scientific Visualization Targeting Domain Experts", "normalizedTitle": "A User-Centered Design Study in Scientific Visualization Targeting Domain Experts", "fno": "08977377", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualisation", "User Centred Design", "User Centered Design Principles", "Information Visualization", "User Centered Design Study", "Domain Experts", "Visualization Solution", "Data Visualization", "User Centered Design", "Visualization", "Usability", "Task Analysis", "Collaboration", "Combustion", "Italic Xmlns Ali Http Www Niso Org Schemas Ali 1 0 Xmlns Mml Http Www W 3 Org 1998 Math Math ML Xmlns Xlink Http Www W 3 Org 1999 Xlink Xmlns Xsi Http Www W 3 Org 2001 XML Schema Instance In Situ Italic Data Visualization", "Usability Studies", "Design Studies", "Qualitative Evaluation", "User Interfaces" ], "authors": [ { "givenName": "Yucong", "surname": "Ye", "fullName": "Yucong Ye", "affiliation": "Department of Computer Science, University of California, Davis", "__typename": "ArticleAuthorType" }, { "givenName": "Franz", "surname": "Sauer", "fullName": "Franz Sauer", "affiliation": "Department of Computer Science, University of California, Davis", "__typename": "ArticleAuthorType" }, { "givenName": "Kwan-Liu", "surname": "Ma", "fullName": "Kwan-Liu Ma", "affiliation": "Department of Computer Science, University of California, Davis", "__typename": "ArticleAuthorType" }, { "givenName": "Konduri", "surname": "Aditya", "fullName": "Konduri Aditya", "affiliation": "Combustion Research FacilitySandia National Laboratories", "__typename": "ArticleAuthorType" }, { "givenName": "Jacqueline", "surname": "Chen", "fullName": "Jacqueline Chen", "affiliation": "Combustion Research FacilitySandia National Laboratories", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2192-2203", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/1999/0093/0/00930096", "title": "User-Centered Design and Evaluation of a Real-Time Battlefield Visualization Virtual Environment", "doi": null, "abstractUrl": "/proceedings-article/vr/1999/00930096/12OmNA2cYEt", "parentPublication": { "id": "proceedings/vr/1999/0093/0", "title": "Proceedings of Virtual Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vizsec/2015/7599/0/07312771", "title": "Unlocking user-centered design methods for building cyber security visualizations", "doi": null, "abstractUrl": "/proceedings-article/vizsec/2015/07312771/12OmNAWH9Ev", "parentPublication": { "id": "proceedings/vizsec/2015/7599/0", "title": "2015 IEEE Symposium on Visualization for Cyber Security (VizSec)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/srii/2012/4770/0/4770a697", "title": "User Centered Design of Innovative E-Service Solutions - A Scientific Approach to User Fascination", "doi": null, "abstractUrl": "/proceedings-article/srii/2012/4770a697/12OmNCvLY08", "parentPublication": { "id": "proceedings/srii/2012/4770/0", "title": "Annual SRII Global Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vizsec/2005/9477/0/01532062", "title": "A user-centered look at glyph-based security visualization", "doi": null, "abstractUrl": "/proceedings-article/vizsec/2005/01532062/12OmNxR5UPi", "parentPublication": { "id": "proceedings/vizsec/2005/9477/0", "title": "IEEE Workshop on Visualization for Computer Security 2005 (VizSEC 05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2011/0868/0/06004027", "title": "Developing and Applying a User-Centered Model for the Design and Implementation of Information Visualization Tools", "doi": null, "abstractUrl": "/proceedings-article/iv/2011/06004027/12OmNyQph8m", "parentPublication": { "id": "proceedings/iv/2011/0868/0", "title": "2011 15th International Conference on Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/e-science/2014/4288/1/06972276", "title": "Experiences with User-Centered Design for the Tigres Workflow API", "doi": null, "abstractUrl": "/proceedings-article/e-science/2014/06972276/12OmNzzP5Hq", "parentPublication": { "id": "proceedings/e-science/2014/4288/1", "title": "2014 IEEE 10th International Conference on e-Science (e-Science)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/01/08017610", "title": "Activity-Centered Domain Characterization for Problem-Driven Scientific Visualization", "doi": null, "abstractUrl": "/journal/tg/2018/01/08017610/13rRUwhHcQX", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/so/2009/01/mso2009010096", "title": "Usability and User-Centered Design in Scientific Software Development", "doi": null, "abstractUrl": "/magazine/so/2009/01/mso2009010096/13rRUwvT9eM", "parentPublication": { "id": "mags/so", "title": "IEEE Software", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a336", "title": "User-Centered Design and Evaluation of ARTTS: an Augmented Reality Triage Tool Suite for Mass Casualty Incidents", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a336/1JrR3eLmZX2", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/works/2022/5191/0/519100a019", "title": "A Domain-Specific Composition Environment for Provenance Query of Scientific Workflows", "doi": null, "abstractUrl": "/proceedings-article/works/2022/519100a019/1KckqxKZTUY", "parentPublication": { "id": "proceedings/works/2022/5191/0", "title": "2022 IEEE/ACM Workshop on Workflows in Support of Large-Scale Science (WORKS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08977505", "articleId": "1h2AIHeB46A", "__typename": "AdjacentArticleType" }, "next": { "fno": "08567954", "articleId": "17D45XDIXXS", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45XDIXXS", "doi": "10.1109/TVCG.2018.2885750", "abstract": "An importance measure of 3D objects inspired by human perception has a range of applications since people want computers to behave like humans in many tasks. This paper revisits a well-defined measure, distinction of 3D surface mesh, which indicates how important a region of a mesh is with respect to classification. We develop a method to compute it based on a classification network and a Markov Random Field (MRF). The classification network learns view-based distinction by handling multiple views of a 3D object. Using a classification network has an advantage of avoiding the training data problem which has become a major obstacle of applying deep learning to 3D object understanding tasks. The MRF estimates the parameters of a linear model for combining the view-based distinction maps. The experiments using several publicly accessible datasets show that the distinctive regions detected by our method are not just significantly different from those detected by methods based on handcrafted features, but more consistent with human perception. We also compare it with other perceptual measures and quantitatively evaluate its performance in the context of two applications. Furthermore, due to the view-based nature of our method, we are able to easily extend mesh distinction to 3D scenes containing multiple objects.", "abstracts": [ { "abstractType": "Regular", "content": "An importance measure of 3D objects inspired by human perception has a range of applications since people want computers to behave like humans in many tasks. This paper revisits a well-defined measure, distinction of 3D surface mesh, which indicates how important a region of a mesh is with respect to classification. We develop a method to compute it based on a classification network and a Markov Random Field (MRF). The classification network learns view-based distinction by handling multiple views of a 3D object. Using a classification network has an advantage of avoiding the training data problem which has become a major obstacle of applying deep learning to 3D object understanding tasks. The MRF estimates the parameters of a linear model for combining the view-based distinction maps. The experiments using several publicly accessible datasets show that the distinctive regions detected by our method are not just significantly different from those detected by methods based on handcrafted features, but more consistent with human perception. We also compare it with other perceptual measures and quantitatively evaluate its performance in the context of two applications. Furthermore, due to the view-based nature of our method, we are able to easily extend mesh distinction to 3D scenes containing multiple objects.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "An importance measure of 3D objects inspired by human perception has a range of applications since people want computers to behave like humans in many tasks. This paper revisits a well-defined measure, distinction of 3D surface mesh, which indicates how important a region of a mesh is with respect to classification. We develop a method to compute it based on a classification network and a Markov Random Field (MRF). The classification network learns view-based distinction by handling multiple views of a 3D object. Using a classification network has an advantage of avoiding the training data problem which has become a major obstacle of applying deep learning to 3D object understanding tasks. The MRF estimates the parameters of a linear model for combining the view-based distinction maps. The experiments using several publicly accessible datasets show that the distinctive regions detected by our method are not just significantly different from those detected by methods based on handcrafted features, but more consistent with human perception. We also compare it with other perceptual measures and quantitatively evaluate its performance in the context of two applications. Furthermore, due to the view-based nature of our method, we are able to easily extend mesh distinction to 3D scenes containing multiple objects.", "title": "Distinction of 3D Objects and Scenes via Classification Network and Markov Random Field", "normalizedTitle": "Distinction of 3D Objects and Scenes via Classification Network and Markov Random Field", "fno": "08567954", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Feature Extraction", "Image Classification", "Image Representation", "Learning Artificial Intelligence", "Markov Processes", "Neural Nets", "Object Detection", "Object Recognition", "Multiple Objects", "Mesh Distinction", "View Based Nature", "Distinctive Regions", "View Based Distinction Maps", "3 D Object Understanding Tasks", "Markov Random Field", "Classification Network", "3 D Surface Mesh", "Human Perception", "Three Dimensional Displays", "Task Analysis", "Shape", "Two Dimensional Displays", "Feature Extraction", "Training", "Markov Random Fields", "3 D Mesh", "Distinction", "Neural Network", "Markov Random Field" ], "authors": [ { "givenName": "Ran", "surname": "Song", "fullName": "Ran Song", "affiliation": "Centre for Secure, Intelligent and Usable Systems, School of Computing, Engineering and Mathematics, University of Brighton, Brighton, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Yonghuai", "surname": "Liu", "fullName": "Yonghuai Liu", "affiliation": "Department of Computer Science, Edge Hill University, Ormskirk, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Paul L.", "surname": "Rosin", "fullName": "Paul L. Rosin", "affiliation": "School of Computer Science and Informatics, Cardiff University, Cardiff, United Kingdom", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2204-2218", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2014/5209/0/5209a930", "title": "Fusion of Image Segmentations under Markov, Random Fields", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209a930/12OmNBUAvZ9", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/camp/1997/7987/0/79870220", "title": "Circuital Markov random fields for analog edge detection", "doi": null, "abstractUrl": "/proceedings-article/camp/1997/79870220/12OmNCdBDXt", "parentPublication": { "id": "proceedings/camp/1997/7987/0", "title": "Computer Architectures for Machine Perception, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fskd/2009/3735/5/3735e442", "title": "SAR Image Segmentation Based on Markov Random Field Model and Multiscale Technology", "doi": null, "abstractUrl": "/proceedings-article/fskd/2009/3735e442/12OmNxZkhti", "parentPublication": { "id": "proceedings/fskd/2009/3735/5", "title": "Fuzzy Systems and Knowledge Discovery, Fourth International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2007/2822/1/04378735", "title": "Text/Non-text Ink Stroke Classification in Japanese Handwriting Based on Markov Random Fields", "doi": null, "abstractUrl": "/proceedings-article/icdar/2007/04378735/12OmNxbEtLz", "parentPublication": { "id": "proceedings/icdar/2007/2822/1", "title": "Ninth International Conference on Document Analysis and Recognition (ICDAR 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2002/1695/4/169540201", "title": "Face Detection and Synthesis Using Markov Random Field Models", "doi": null, "abstractUrl": "/proceedings-article/icpr/2002/169540201/12OmNyfdOX0", "parentPublication": { "id": "proceedings/icpr/2002/1695/4", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icassp/2004/8484/4/01326913", "title": "On iterative source-channel image decoding with Markov random field source models", "doi": null, "abstractUrl": "/proceedings-article/icassp/2004/01326913/12OmNzUgdgz", "parentPublication": { "id": "proceedings/icassp/2004/8484/4", "title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1994/6952/2/00413557", "title": "Segmentation of range and intensity images using multiscale Markov random field representations", "doi": null, "abstractUrl": "/proceedings-article/icip/1994/00413557/12OmNzcxZhC", "parentPublication": { "id": "proceedings/icip/1994/6952/2", "title": "Proceedings of 1st International Conference on Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/cc/2019/03/07872428", "title": "A Markov Random Field Based Approach for Analyzing Supercomputer System Logs", "doi": null, "abstractUrl": "/journal/cc/2019/03/07872428/13rRUwh80Jx", "parentPublication": { "id": "trans/cc", "title": "IEEE Transactions on Cloud Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2006/11/i1830", "title": "Dense Photometric Stereo: A Markov Random Field Approach", "doi": null, "abstractUrl": "/journal/tp/2006/11/i1830/13rRUygT7tT", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2018/8425/0/842500a662", "title": "A Data-Driven Prior on Facet Orientation for Semantic Mesh Labeling", "doi": null, "abstractUrl": "/proceedings-article/3dv/2018/842500a662/17D45WgziON", "parentPublication": { "id": "proceedings/3dv/2018/8425/0", "title": "2018 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08977377", "articleId": "1h2AJ4jdnFK", "__typename": "AdjacentArticleType" }, "next": { "fno": "08546802", "articleId": "17D45WrVg2d", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45WrVg2d", "doi": "10.1109/TVCG.2018.2883630", "abstract": "Tracking the temporal evolution of features in time-varying data is a key method in visualization. For typical feature definitions, such as vortices, objects are sparsely distributed over the data domain. In this paper, we present a novel approach for tracking both sparse and space-filling features. While the former comprise only a small fraction of the domain, the latter form a set of objects whose union covers the domain entirely while the individual objects are mutually disjunct. Our approach determines the assignment of features between two successive time-steps by solving two graph optimization problems. It first resolves one-to-one assignments of features by computing a maximum-weight, maximum-cardinality matching on a weighted bi-partite graph. Second, our algorithm detects events by creating a graph of potentially conflicting event explanations and finding a weighted, independent set in it. We demonstrate our method's effectiveness on synthetic and simulation data sets, the former of which enables quantitative evaluation because of the availability of ground-truth information. Here, our method performs on par or better than a well-established reference algorithm. In addition, manual visual inspection by our collaborators confirm the results' plausibility for simulation data.", "abstracts": [ { "abstractType": "Regular", "content": "Tracking the temporal evolution of features in time-varying data is a key method in visualization. For typical feature definitions, such as vortices, objects are sparsely distributed over the data domain. In this paper, we present a novel approach for tracking both sparse and space-filling features. While the former comprise only a small fraction of the domain, the latter form a set of objects whose union covers the domain entirely while the individual objects are mutually disjunct. Our approach determines the assignment of features between two successive time-steps by solving two graph optimization problems. It first resolves one-to-one assignments of features by computing a maximum-weight, maximum-cardinality matching on a weighted bi-partite graph. Second, our algorithm detects events by creating a graph of potentially conflicting event explanations and finding a weighted, independent set in it. We demonstrate our method's effectiveness on synthetic and simulation data sets, the former of which enables quantitative evaluation because of the availability of ground-truth information. Here, our method performs on par or better than a well-established reference algorithm. In addition, manual visual inspection by our collaborators confirm the results' plausibility for simulation data.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Tracking the temporal evolution of features in time-varying data is a key method in visualization. For typical feature definitions, such as vortices, objects are sparsely distributed over the data domain. In this paper, we present a novel approach for tracking both sparse and space-filling features. While the former comprise only a small fraction of the domain, the latter form a set of objects whose union covers the domain entirely while the individual objects are mutually disjunct. Our approach determines the assignment of features between two successive time-steps by solving two graph optimization problems. It first resolves one-to-one assignments of features by computing a maximum-weight, maximum-cardinality matching on a weighted bi-partite graph. Second, our algorithm detects events by creating a graph of potentially conflicting event explanations and finding a weighted, independent set in it. We demonstrate our method's effectiveness on synthetic and simulation data sets, the former of which enables quantitative evaluation because of the availability of ground-truth information. Here, our method performs on par or better than a well-established reference algorithm. In addition, manual visual inspection by our collaborators confirm the results' plausibility for simulation data.", "title": "Feature Tracking by Two-Step Optimization", "normalizedTitle": "Feature Tracking by Two-Step Optimization", "fno": "08546802", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computational Complexity", "Data Visualisation", "Feature Extraction", "Graph Theory", "Optimisation", "Two Step Optimization", "Time Varying Data", "Feature Definition", "Data Domain", "Sparse Space Filling Features", "Graph Optimization Problems", "Maximum Weight Matching", "Maximum Cardinality Matching", "Weighted Bi Partite Graph", "Feature Tracking", "Feature Extraction", "Target Tracking", "Optimization", "Data Visualization", "Data Models", "Analytical Models", "Heuristic Algorithms", "Global Optimization", "Simulation Output Analysis", "Flow Visualization" ], "authors": [ { "givenName": "Andrea", "surname": "Schnorr", "fullName": "Andrea Schnorr", "affiliation": "JARA – High-Performance Computing and the Visual Computing Institute, RWTH Aachen University, Aachen, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Dirk", "surname": "N. Helmrich", "fullName": "Dirk N. Helmrich", "affiliation": "JARA – High-Performance Computing and the Visual Computing Institute, RWTH Aachen University, Aachen, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Dominik", "surname": "Denker", "fullName": "Dominik Denker", "affiliation": "Institute for Combustion Technology, RWTH Aachen University, Aachen, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Torsten W.", "surname": "Kuhlen", "fullName": "Torsten W. Kuhlen", "affiliation": "JARA – High-Performance Computing and the Visual Computing Institute, RWTH Aachen University, Aachen, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Bernd", "surname": "Hentschel", "fullName": "Bernd Hentschel", "affiliation": "JARA – High-Performance Computing and the Visual Computing Institute, RWTH Aachen University, Aachen, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2219-2233", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/visapp/2014/8133/3/07295096", "title": "Feature matching using CO-inertia analysis for people tracking", "doi": null, "abstractUrl": "/proceedings-article/visapp/2014/07295096/12OmNvsm6zh", "parentPublication": { "id": "proceedings/visapp/2014/8133/2", "title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109d607", "title": "Optimization of Target Objects for Natural Feature Tracking", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109d607/12OmNyv7m5x", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2015/9711/0/5720a751", "title": "Attributed Graphs for Tracking Multiple Objects in Structured Sports Videos", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2015/5720a751/12OmNzVoBNd", "parentPublication": { "id": "proceedings/iccvw/2015/9711/0", "title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209d678", "title": "Unsupervised Tracking from Clustered Graph Patterns", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209d678/12OmNzwHvrO", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vspets/2005/9424/0/01570894", "title": "Object tracking with dynamic feature graph", "doi": null, "abstractUrl": "/proceedings-article/vspets/2005/01570894/12OmNzwpUhP", "parentPublication": { "id": "proceedings/vspets/2005/9424/0", "title": "Proceedings. 2nd Joint IEEE International Workshop on Visual Surveillance and Performance Evaluation of Tracking and Surveillance (VS-PETS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2017/2636/0/263600a007", "title": "Learning Deep Appearance Feature for Multi-target Tracking", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2017/263600a007/1ap5AZ64kLK", "parentPublication": { "id": "proceedings/icvrv/2017/2636/0", "title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2019/9552/0/955200b774", "title": "Robust Deep Tracking with Two-step Augmentation Discriminative Correlation Filters", "doi": null, "abstractUrl": "/proceedings-article/icme/2019/955200b774/1cdOHHPL6V2", "parentPublication": { "id": "proceedings/icme/2019/9552/0", "title": "2019 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2021/02/09311214", "title": "A Confidence-Guided Technique for Tracking Time-Varying Features", "doi": null, "abstractUrl": "/magazine/cs/2021/02/09311214/1pYWIN9JCTe", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09413110", "title": "Robust Visual Object Tracking with Two-Stream Residual Convolutional Networks", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09413110/1tmjzhcSj28", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieit/2021/2563/0/256300a244", "title": "Object tracking algorithm based on fusion of SiamFC and Feature Pyramid Network", "doi": null, "abstractUrl": "/proceedings-article/ieit/2021/256300a244/1wHKqvHr7mo", "parentPublication": { "id": "proceedings/ieit/2021/2563/0", "title": "2021 International Conference on Internet, Education and Information Technology (IEIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08567954", "articleId": "17D45XDIXXS", "__typename": "AdjacentArticleType" }, "next": { "fno": "08573859", "articleId": "17D45We0UEn", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45We0UEn", "doi": "10.1109/TVCG.2018.2886322", "abstract": "This paper extends the recently proposed power-particle-based fluid simulation method with staggered discretization, GPU implementation, and adaptive sampling, largely enhancing the efficiency and usability of the method. In contrast to the original formulation which uses co-located pressures and velocities, in this paper, a staggered scheme is adapted to the Power Particles to benefit visual details and computing efficiency. Meanwhile, we propose a novel facet-based power diagrams construction algorithm suitable for parallelization and explore its GPU implementation, achieving an order of magnitude boost in performance over the existing code library. In addition, to utilize the potential of Power Particles to control individual cell volume, we apply adaptive particle sampling to improve the detail level with varying resolution. The proposed method can be entirely carried out on GPUs, and our extensive experiments validate our method both in terms of efficiency and visual quality.", "abstracts": [ { "abstractType": "Regular", "content": "This paper extends the recently proposed power-particle-based fluid simulation method with staggered discretization, GPU implementation, and adaptive sampling, largely enhancing the efficiency and usability of the method. In contrast to the original formulation which uses co-located pressures and velocities, in this paper, a staggered scheme is adapted to the Power Particles to benefit visual details and computing efficiency. Meanwhile, we propose a novel facet-based power diagrams construction algorithm suitable for parallelization and explore its GPU implementation, achieving an order of magnitude boost in performance over the existing code library. In addition, to utilize the potential of Power Particles to control individual cell volume, we apply adaptive particle sampling to improve the detail level with varying resolution. The proposed method can be entirely carried out on GPUs, and our extensive experiments validate our method both in terms of efficiency and visual quality.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper extends the recently proposed power-particle-based fluid simulation method with staggered discretization, GPU implementation, and adaptive sampling, largely enhancing the efficiency and usability of the method. In contrast to the original formulation which uses co-located pressures and velocities, in this paper, a staggered scheme is adapted to the Power Particles to benefit visual details and computing efficiency. Meanwhile, we propose a novel facet-based power diagrams construction algorithm suitable for parallelization and explore its GPU implementation, achieving an order of magnitude boost in performance over the existing code library. In addition, to utilize the potential of Power Particles to control individual cell volume, we apply adaptive particle sampling to improve the detail level with varying resolution. The proposed method can be entirely carried out on GPUs, and our extensive experiments validate our method both in terms of efficiency and visual quality.", "title": "Fluid Simulation with Adaptive Staggered Power Particles on GPUs", "normalizedTitle": "Fluid Simulation with Adaptive Staggered Power Particles on GPUs", "fno": "08573859", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computational Fluid Dynamics", "Computer Graphics", "Flow Simulation", "Graphics Processing Units", "Sampling Methods", "Adaptive Staggered Power Particles", "GPU", "Staggered Discretization", "Adaptive Particle Sampling", "Power Particle Based Fluid Simulation", "Facet Based Power Diagrams Construction Algorithm", "Visual Quality", "Visualization", "Adaptation Models", "Computational Modeling", "Graphics Processing Units", "Libraries", "Liquids", "Physically Based Modeling", "Fluid Simulation", "Power Diagrams", "GPU Parallelization", "Adaptive Sampling" ], "authors": [ { "givenName": "Xiao", "surname": "Zhai", "fullName": "Xiao Zhai", "affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, Beihang University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Fei", "surname": "Hou", "fullName": "Fei Hou", "affiliation": "State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Hong", "surname": "Qin", "fullName": "Hong Qin", "affiliation": "Department of Computer Science, Stony Brook University, New York, NY, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Aimin", "surname": "Hao", "fullName": "Aimin Hao", "affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, Beihang University, Beijing, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2234-2246", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cw/2015/9403/0/9403a375", "title": "A Particle-Based Real-Time CG Rendering of Carbonated Water with Automatic Release of Bubbles", "doi": null, "abstractUrl": "/proceedings-article/cw/2015/9403a375/12OmNA14Ach", "parentPublication": { "id": "proceedings/cw/2015/9403/0", "title": "2015 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1992/2897/0/00235226", "title": "Rendering surface-particles", "doi": null, "abstractUrl": "/proceedings-article/visual/1992/00235226/12OmNz61dc1", "parentPublication": { "id": "proceedings/visual/1992/2897/0", "title": "Proceedings Visualization '92", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/e-science/2016/4273/0/07870923", "title": "A fast algorithm for neutrally-buoyant Lagrangian particles in numerical ocean modeling", "doi": null, "abstractUrl": "/proceedings-article/e-science/2016/07870923/12OmNzUxObB", "parentPublication": { "id": "proceedings/e-science/2016/4273/0", "title": "2016 IEEE 12th International Conference on e-Science (e-Science)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sc/2014/5500/0/5500a054", "title": "24.77 Pflops on a Gravitational Tree-Code to Simulate the Milky Way Galaxy with 18600 GPUs", "doi": null, "abstractUrl": "/proceedings-article/sc/2014/5500a054/12OmNzb7Zu7", "parentPublication": { "id": "proceedings/sc/2014/5500/0", "title": "SC14: International Conference for High Performance Computing, Networking, Storage and Analysis", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/candar/2017/2087/0/2087a132", "title": "SPH-based Fluid Simulation on GPU Using Verlet List and Subdivided Cell-Linked List", "doi": null, "abstractUrl": "/proceedings-article/candar/2017/2087a132/12OmNzdoMHd", "parentPublication": { "id": "proceedings/candar/2017/2087/0", "title": "2017 Fifth International Symposium on Computing and Networking (CANDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icppw/2015/7589/0/7589a081", "title": "A Special Sorting Method for Neighbor Search Procedure in Smoothed Particle Hydrodynamics on GPUs", "doi": null, "abstractUrl": "/proceedings-article/icppw/2015/7589a081/12OmNzxPTGh", "parentPublication": { "id": "proceedings/icppw/2015/7589/0", "title": "2015 44th International Conference on Parallel Processing Workshops (ICPPW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/08/07243356", "title": "Fast Coherent Particle Advection through Time-Varying Unstructured Flow Datasets", "doi": null, "abstractUrl": "/journal/tg/2016/08/07243356/13rRUx0xPIN", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2005/03/c3039", "title": "A Seamless Approach to Multiscale Complex Fluid Simulation", "doi": null, "abstractUrl": "/magazine/cs/2005/03/c3039/13rRUxbTMt3", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdiime/2022/9009/0/900900a048", "title": "Solid-Fluid Interaction Simulation System Based on SPH Unified Particle Framework", "doi": null, "abstractUrl": "/proceedings-article/icdiime/2022/900900a048/1Iz56eSpj3y", "parentPublication": { "id": "proceedings/icdiime/2022/9009/0", "title": "2022 International Conference on 3D Immersion, Interaction and Multi-sensory Experiences (ICDIIME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/candar/2020/8221/0/822100a195", "title": "Exploiting temporal parallelism in particle-based incompressive fluid simulation on FPGA", "doi": null, "abstractUrl": "/proceedings-article/candar/2020/822100a195/1sA9a0wFBIc", "parentPublication": { "id": "proceedings/candar/2020/8221/0", "title": "2020 Eighth International Symposium on Computing and Networking (CANDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08546802", "articleId": "17D45WrVg2d", "__typename": "AdjacentArticleType" }, "next": { "fno": "08565948", "articleId": "17D45Wda7ec", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1jx1mmLVkpW", "name": "ttg202006-08573859s1-tvcg_video.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202006-08573859s1-tvcg_video.mp4", "extension": "mp4", "size": "47.7 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45Wda7ec", "doi": "10.1109/TVCG.2018.2884940", "abstract": "User interaction has the potential to greatly facilitate the exploration and understanding of 3D medical images for diagnosis and treatment. However, in certain specialized environments such as in an operating room (OR), technical and physical constraints such as the need to enforce strict sterility rules, make interaction challenging. In this paper, we propose to facilitate the intraoperative exploration of angiographic volumes by leveraging the motion of a tracked surgical pointer, a tool that is already manipulated by the surgeon when using a navigation system in the OR. We designed and implemented three interactive rendering techniques based on this principle. The benefit of each of these techniques is compared to its non-interactive counterpart in a psychophysics experiment where 20 medical imaging experts were asked to perform a reaching/targeting task while visualizing a 3D volume of angiographic data. The study showed a significant improvement of the appreciation of local vascular structure when using dynamic techniques, while not having a negative impact on the appreciation of the global structure and only a marginal impact on the execution speed. A qualitative evaluation of the different techniques showed a preference for dynamic chroma-depth in accordance with the objective metrics but a discrepancy between objective and subjective measures for dynamic aerial perspective and shading.", "abstracts": [ { "abstractType": "Regular", "content": "User interaction has the potential to greatly facilitate the exploration and understanding of 3D medical images for diagnosis and treatment. However, in certain specialized environments such as in an operating room (OR), technical and physical constraints such as the need to enforce strict sterility rules, make interaction challenging. In this paper, we propose to facilitate the intraoperative exploration of angiographic volumes by leveraging the motion of a tracked surgical pointer, a tool that is already manipulated by the surgeon when using a navigation system in the OR. We designed and implemented three interactive rendering techniques based on this principle. The benefit of each of these techniques is compared to its non-interactive counterpart in a psychophysics experiment where 20 medical imaging experts were asked to perform a reaching/targeting task while visualizing a 3D volume of angiographic data. The study showed a significant improvement of the appreciation of local vascular structure when using dynamic techniques, while not having a negative impact on the appreciation of the global structure and only a marginal impact on the execution speed. A qualitative evaluation of the different techniques showed a preference for dynamic chroma-depth in accordance with the objective metrics but a discrepancy between objective and subjective measures for dynamic aerial perspective and shading.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "User interaction has the potential to greatly facilitate the exploration and understanding of 3D medical images for diagnosis and treatment. However, in certain specialized environments such as in an operating room (OR), technical and physical constraints such as the need to enforce strict sterility rules, make interaction challenging. In this paper, we propose to facilitate the intraoperative exploration of angiographic volumes by leveraging the motion of a tracked surgical pointer, a tool that is already manipulated by the surgeon when using a navigation system in the OR. We designed and implemented three interactive rendering techniques based on this principle. The benefit of each of these techniques is compared to its non-interactive counterpart in a psychophysics experiment where 20 medical imaging experts were asked to perform a reaching/targeting task while visualizing a 3D volume of angiographic data. The study showed a significant improvement of the appreciation of local vascular structure when using dynamic techniques, while not having a negative impact on the appreciation of the global structure and only a marginal impact on the execution speed. A qualitative evaluation of the different techniques showed a preference for dynamic chroma-depth in accordance with the objective metrics but a discrepancy between objective and subjective measures for dynamic aerial perspective and shading.", "title": "Interaction Driven Enhancement of Depth Perception in Angiographic Volumes", "normalizedTitle": "Interaction Driven Enhancement of Depth Perception in Angiographic Volumes", "fno": "08565948", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Biomedical MRI", "Blood Vessels", "Data Visualisation", "Interactive Systems", "Medical Image Processing", "Rendering Computer Graphics", "Surgery", "Navigation System", "Interactive Rendering Techniques", "Noninteractive Counterpart", "Psychophysics Experiment", "20 Medical Imaging Experts", "Angiographic Data", "Appreciation", "Local Vascular Structure", "Dynamic Chroma Depth", "Interaction Driven Enhancement", "Depth Perception", "Angiographic Volumes", "User Interaction", "3 D Medical Images", "Specialized Environments", "Intraoperative Exploration", "Tracked Surgical Pointer", "Sterility Rules", "Surgery", "Rendering Computer Graphics", "Three Dimensional Displays", "Tracking", "Biomedical Imaging", "Tools", "Navigation", "Image Guided Surgery", "Volume Visualization", "Interaction Techniques", "Depth Cues", "Evaluation", "Angiography" ], "authors": [ { "givenName": "Simon", "surname": "Drouin", "fullName": "Simon Drouin", "affiliation": "Department of Biomedical Engineering, McConnell Brain Imaging Center, Montreal Neurological Institute, McGill University, Montreal, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "Daniel A. Di", "surname": "Giovanni", "fullName": "Daniel A. Di Giovanni", "affiliation": "Department of Biomedical Engineering, McConnell Brain Imaging Center, Montreal Neurological Institute, McGill University, Montreal, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "Marta", "surname": "Kersten-Oertel", "fullName": "Marta Kersten-Oertel", "affiliation": "Department of Computer Science and Software Engineering, Concordia University, Montreal, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "D. Louis", "surname": "Collins", "fullName": "D. Louis Collins", "affiliation": "Department of Biomedical Engineering, McConnell Brain Imaging Center, Montreal Neurological Institute, McGill University, Montreal, Canada", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2247-2257", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cis/2009/3931/1/3931a160", "title": "Extracting the Coronary Artery in Angiographic Image Based on à Trous Wavelet of Rotary Gaussian with Adaptive Space Coefficient", "doi": null, "abstractUrl": "/proceedings-article/cis/2009/3931a160/12OmNAGNCaX", "parentPublication": { "id": "proceedings/cis/2009/3931/1", "title": "2009 International Conference on Computational Intelligence and Security", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/1996/7441/0/00507125", "title": "The Fourier adaptive smoothness constraint for computing optical flow on sequences of angiographic images", "doi": null, "abstractUrl": "/proceedings-article/cbms/1996/00507125/12OmNrHB1Vm", "parentPublication": { "id": "proceedings/cbms/1996/7441/0", "title": "Proceedings Ninth IEEE Symposium on Computer-Based Medical Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/1996/7441/0/00507121", "title": "A method for automatically detecting the systole and diastole phases in sequences of angiographic images", "doi": null, "abstractUrl": "/proceedings-article/cbms/1996/00507121/12OmNwFid1h", "parentPublication": { "id": "proceedings/cbms/1996/7441/0", "title": "Proceedings Ninth IEEE Symposium on Computer-Based Medical Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vbc/1990/2039/0/00109321", "title": "Coronary vasculature visualization from limited angiographic views", "doi": null, "abstractUrl": "/proceedings-article/vbc/1990/00109321/12OmNxy4N2N", "parentPublication": { "id": "proceedings/vbc/1990/2039/0", "title": "[1990] Proceedings of the First Conference on Visualization in Biomedical Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cic/1989/2114/0/00130584", "title": "Computation of functional angiographic images with the Hartley transform", "doi": null, "abstractUrl": "/proceedings-article/cic/1989/00130584/12OmNy2ah21", "parentPublication": { "id": "proceedings/cic/1989/2114/0", "title": "Proceedings Computers in Cardiology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bmei/2008/3118/1/3118a341", "title": "Perception-aware Depth Cueing for Illustrative Vascular Visualization", "doi": null, "abstractUrl": "/proceedings-article/bmei/2008/3118a341/12OmNzvhvKm", "parentPublication": { "id": "proceedings/bmei/2008/3118/1", "title": "2008 International Conference on Biomedical Engineering and Informatics (BMEI 2008)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/05/v1117", "title": "Enhancing Depth Perception in Translucent Volumes", "doi": null, "abstractUrl": "/journal/tg/2006/05/v1117/13rRUygT7y1", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2018/5488/0/08621540", "title": "Inter/Intra-Constraints Optimization for Fast Vessel Enhancement in X-ray Angiographic Image Sequence", "doi": null, "abstractUrl": "/proceedings-article/bibm/2018/08621540/17D45X0yjUO", "parentPublication": { "id": "proceedings/bibm/2018/5488/0", "title": "2018 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccgiv/2022/9250/0/925000a111", "title": "Coronary Artery Segmentation from X-ray Angiographic Images using Width-aware U-Net", "doi": null, "abstractUrl": "/proceedings-article/iccgiv/2022/925000a111/1LxfpGyhNcY", "parentPublication": { "id": "proceedings/iccgiv/2022/9250/0", "title": "2022 2nd International Conference on Computer Graphics, Image and Virtualization (ICCGIV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acit-csii-bcd/2017/3302/0/3302a190", "title": "Depth Recognition in 3D Translucent Stereoscopic Imaging of Medical Volumes by Means of a Glasses-Free 3D Display", "doi": null, "abstractUrl": "/proceedings-article/acit-csii-bcd/2017/3302a190/1cdOB3HCeTm", "parentPublication": { "id": "proceedings/acit-csii-bcd/2017/3302/0", "title": "2017 5th Intl Conf on Applied Computing and Information Technology/4th Intl Conf on Computational Science/Intelligence and Applied Informatics/2nd Intl Conf on Big Data, Cloud Computing, Data Science (ACIT-CSII-BCD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08573859", "articleId": "17D45We0UEn", "__typename": "AdjacentArticleType" }, "next": { "fno": "08576679", "articleId": "17D45XreC6e", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1js2Esj96ak", "name": "ttg202006-08565948s1-interactive_rendering_video.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202006-08565948s1-interactive_rendering_video.mp4", "extension": "mp4", "size": "28.1 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45XreC6e", "doi": "10.1109/TVCG.2018.2886877", "abstract": "Material appearance of rendered objects depends on the underlying BRDF implementation used by rendering software packages. A lack of standards to exchange material parameters and data (between tools) means that artists in digital 3D prototyping and design, manually match the appearance of materials to a reference image. Since their effect on rendered output is often non-uniform and counter intuitive, selecting appropriate parameterisations for BRDF models is far from straightforward. We present a novel BRDF remapping technique, that automatically computes a mapping (BRDF Difference Probe) to match the appearance of a source material model to a target one. Through quantitative analysis, four user studies and psychometric scaling experiments, we validate our remapping framework and demonstrate that it yields a visually faithful remapping among analytical BRDFs. Most notably, our results show that even when the characteristics of the models are substantially different, such as in the case of a phenomenological model and a physically-based one, our remapped renderings are indistinguishable from the original source model.", "abstracts": [ { "abstractType": "Regular", "content": "Material appearance of rendered objects depends on the underlying BRDF implementation used by rendering software packages. A lack of standards to exchange material parameters and data (between tools) means that artists in digital 3D prototyping and design, manually match the appearance of materials to a reference image. Since their effect on rendered output is often non-uniform and counter intuitive, selecting appropriate parameterisations for BRDF models is far from straightforward. We present a novel BRDF remapping technique, that automatically computes a mapping (BRDF Difference Probe) to match the appearance of a source material model to a target one. Through quantitative analysis, four user studies and psychometric scaling experiments, we validate our remapping framework and demonstrate that it yields a visually faithful remapping among analytical BRDFs. Most notably, our results show that even when the characteristics of the models are substantially different, such as in the case of a phenomenological model and a physically-based one, our remapped renderings are indistinguishable from the original source model.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Material appearance of rendered objects depends on the underlying BRDF implementation used by rendering software packages. A lack of standards to exchange material parameters and data (between tools) means that artists in digital 3D prototyping and design, manually match the appearance of materials to a reference image. Since their effect on rendered output is often non-uniform and counter intuitive, selecting appropriate parameterisations for BRDF models is far from straightforward. We present a novel BRDF remapping technique, that automatically computes a mapping (BRDF Difference Probe) to match the appearance of a source material model to a target one. Through quantitative analysis, four user studies and psychometric scaling experiments, we validate our remapping framework and demonstrate that it yields a visually faithful remapping among analytical BRDFs. Most notably, our results show that even when the characteristics of the models are substantially different, such as in the case of a phenomenological model and a physically-based one, our remapped renderings are indistinguishable from the original source model.", "title": "Perceptually Validated Cross-Renderer Analytical BRDF Parameter Remapping", "normalizedTitle": "Perceptually Validated Cross-Renderer Analytical BRDF Parameter Remapping", "fno": "08576679", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Image Processing", "Rendering Computer Graphics", "Solid Modelling", "Visually Faithful Remapping", "Remapped Renderings", "Software Packages", "Cross Renderer Analytical BRDF Parameter Remapping", "BRDF Remapping Technique", "BRDF Difference Probe", "Digital 3 D Prototyping", "Rendering Computer Graphics", "Computational Modeling", "Lighting", "Measurement", "Probes", "Visualization", "Optimization", "BRDF", "SVBRDF", "Perceptual Validation", "Virtual Materials", "Surface Perception", "Parameter Remapping" ], "authors": [ { "givenName": "Dar'ya", "surname": "Guarnera", "fullName": "Dar'ya Guarnera", "affiliation": "Computer Science, Norwegian University of Science and Technology, Gjøvik, Norway", "__typename": "ArticleAuthorType" }, { "givenName": "Giuseppe Claudio", "surname": "Guarnera", "fullName": "Giuseppe Claudio Guarnera", "affiliation": "Computer Science, Norwegian University of Science and Technology, Gjøvik, Norway", "__typename": "ArticleAuthorType" }, { "givenName": "Matteo", "surname": "Toscani", "fullName": "Matteo Toscani", "affiliation": "Psychology, Justus-Liebig-Universität Giessen, Giessen, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Mashhuda", "surname": "Glencross", "fullName": "Mashhuda Glencross", "affiliation": "Pismo Software Ltd., Oxford, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Baihua", "surname": "Li", "fullName": "Baihua Li", "affiliation": "Computer Science, Loughborough University, Loughborough, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Jon Yngve", "surname": "Hardeberg", "fullName": "Jon Yngve Hardeberg", "affiliation": "Computer Science, Norwegian University of Science and Technology, Gjøvik, Norway", "__typename": "ArticleAuthorType" }, { "givenName": "Karl R.", "surname": "Gegenfurtner", "fullName": "Karl R. Gegenfurtner", "affiliation": "Psychology, Justus-Liebig-Universität Giessen, Giessen, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2258-2272", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/wmsvm/2010/7077/0/05558360", "title": "Modeling and Editing Isotropic BRDF", "doi": null, "abstractUrl": "/proceedings-article/wmsvm/2010/05558360/12OmNARiM3T", "parentPublication": { "id": "proceedings/wmsvm/2010/7077/0", "title": "2010 Second International Conference on Modeling, Simulation and Visualization Methods (WMSVM 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209c047", "title": "Effective Acquisition of Dense Anisotropic BRDF", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209c047/12OmNqNXEsZ", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nswctc/2010/4011/1/4011a332", "title": "The Analysis of Global Illumination Rendering Based on BRDF", "doi": null, "abstractUrl": "/proceedings-article/nswctc/2010/4011a332/12OmNyvGynS", "parentPublication": { "id": "proceedings/nswctc/2010/4011/1", "title": "Networks Security, Wireless Communications and Trusted Computing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391d559", "title": "A Gaussian Process Latent Variable Model for BRDF Inference", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391d559/12OmNzVoBvI", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/11/ttg2012111824", "title": "Rational BRDF", "doi": null, "abstractUrl": "/journal/tg/2012/11/ttg2012111824/13rRUwjGoFZ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/04/09678000", "title": "Real-Time Lighting Estimation for Augmented Reality via Differentiable Screen-Space Rendering", "doi": null, "abstractUrl": "/journal/tg/2023/04/09678000/1A4SuYWCI7K", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800f416", "title": "Neural Voxel Renderer: Learning an Accurate and Controllable Rendering Tool", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800f416/1m3nYbnokEM", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/04/09203787", "title": "Learning-Based Inverse Bi-Scale Material Fitting From Tabular BRDFs", "doi": null, "abstractUrl": "/journal/tg/2022/04/09203787/1nkyY8W8j1m", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09444888", "title": "Estimating Homogeneous Data-Driven BRDF Parameters From a Reflectance Map Under Known Natural Lighting", "doi": null, "abstractUrl": "/journal/tg/2022/12/09444888/1u51y8PQCMU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/12/09623493", "title": "Invertible Neural BRDF for Object Inverse Rendering", "doi": null, "abstractUrl": "/journal/tp/2022/12/09623493/1yJT7tLzbi0", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08565948", "articleId": "17D45Wda7ec", "__typename": "AdjacentArticleType" }, "next": { "fno": "08554159", "articleId": "17D45WB0qbp", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1js2FLKT2a4", "name": "ttg202006-08576679s1-supplemental_material.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202006-08576679s1-supplemental_material.pdf", "extension": "pdf", "size": "54.2 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45WB0qbp", "doi": "10.1109/TVCG.2018.2884468", "abstract": "Various viewing and travel techniques are used in immersive virtual reality to allow users to see different areas or perspectives of 3D environments. Our research evaluates techniques for visually showing transitions between two viewpoints in head-tracked virtual reality. We present four experiments that focus on automated viewpoint changes that are controlled by the system rather than by interactive user control. The experiments evaluate three different transition techniques (teleportation, animated interpolation, and pulsed interpolation), different types of visual adjustments for each technique, and different types of viewpoint changes. We evaluated how differences in transition can influence a viewer's comfort, sickness, and ability to maintain spatial awareness of dynamic objects in a virtual scene. For instant teleportations, the experiments found participants could most easily track scene changes with rotational transitions without translational movements. Among the tested techniques, animated interpolations allowed significantly better spatial awareness of moving objects, but the animated technique was also rated worst in terms of sickness, particularly for rotational viewpoint changes. Across techniques, viewpoint transitions involving both translational and rotational changes together were more difficult to track than either individual type of change.", "abstracts": [ { "abstractType": "Regular", "content": "Various viewing and travel techniques are used in immersive virtual reality to allow users to see different areas or perspectives of 3D environments. Our research evaluates techniques for visually showing transitions between two viewpoints in head-tracked virtual reality. We present four experiments that focus on automated viewpoint changes that are controlled by the system rather than by interactive user control. The experiments evaluate three different transition techniques (teleportation, animated interpolation, and pulsed interpolation), different types of visual adjustments for each technique, and different types of viewpoint changes. We evaluated how differences in transition can influence a viewer's comfort, sickness, and ability to maintain spatial awareness of dynamic objects in a virtual scene. For instant teleportations, the experiments found participants could most easily track scene changes with rotational transitions without translational movements. Among the tested techniques, animated interpolations allowed significantly better spatial awareness of moving objects, but the animated technique was also rated worst in terms of sickness, particularly for rotational viewpoint changes. Across techniques, viewpoint transitions involving both translational and rotational changes together were more difficult to track than either individual type of change.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Various viewing and travel techniques are used in immersive virtual reality to allow users to see different areas or perspectives of 3D environments. Our research evaluates techniques for visually showing transitions between two viewpoints in head-tracked virtual reality. We present four experiments that focus on automated viewpoint changes that are controlled by the system rather than by interactive user control. The experiments evaluate three different transition techniques (teleportation, animated interpolation, and pulsed interpolation), different types of visual adjustments for each technique, and different types of viewpoint changes. We evaluated how differences in transition can influence a viewer's comfort, sickness, and ability to maintain spatial awareness of dynamic objects in a virtual scene. For instant teleportations, the experiments found participants could most easily track scene changes with rotational transitions without translational movements. Among the tested techniques, animated interpolations allowed significantly better spatial awareness of moving objects, but the animated technique was also rated worst in terms of sickness, particularly for rotational viewpoint changes. Across techniques, viewpoint transitions involving both translational and rotational changes together were more difficult to track than either individual type of change.", "title": "Scene Transitions and Teleportation in Virtual Reality and the Implications for Spatial Awareness and Sickness", "normalizedTitle": "Scene Transitions and Teleportation in Virtual Reality and the Implications for Spatial Awareness and Sickness", "fno": "08554159", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Animation", "Virtual Reality", "Automated Viewpoint Changes", "Head Tracked Virtual Reality", "Immersive Virtual Reality", "Scene Transitions", "Rotational Changes", "Translational Changes", "Viewpoint Transitions", "Rotational Viewpoint Changes", "Animated Technique", "Rotational Transitions", "Scene Changes", "Instant Teleportations", "Virtual Scene", "Spatial Awareness", "Visual Adjustments", "Pulsed Interpolation", "Animated Interpolation", "Teleportation", "Transition Techniques", "Interactive User Control", "Teleportation", "Three Dimensional Displays", "Legged Locomotion", "Tracking", "Space Exploration", "Motion Pictures", "Virtual Reality", "Animation", "Virtual Reality", "View Transitions", "Scene Transitions", "Travel", "Immersive Cinema", "3 D Movies", "Teleportation", "Navigation", "Sickness", "Spatial Orientation", "Spatial Awareness" ], "authors": [ { "givenName": "Kasra", "surname": "Moghadam", "fullName": "Kasra Moghadam", "affiliation": "Texas A&M University, College Station, TX, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Colin", "surname": "Banigan", "fullName": "Colin Banigan", "affiliation": "Texas A&M University, College Station, TX, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Eric D.", "surname": "Ragan", "fullName": "Eric D. Ragan", "affiliation": "University of Florida, Gainesville, FL, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2273-2287", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/robot/1991/2163/0/00131936", "title": "Biped gait transitions", "doi": null, "abstractUrl": "/proceedings-article/robot/1991/00131936/12OmNAS9zt7", "parentPublication": { "id": "proceedings/robot/1991/2163/0", "title": "Proceedings. 1991 IEEE International Conference on Robotics and Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892316", "title": "An exploration of input conditions for virtual teleportation", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892316/12OmNCzb9vr", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciev/2016/1269/0/07760005", "title": "Random forests based recognition of human activities and postural transitions on smartphone", "doi": null, "abstractUrl": "/proceedings-article/iciev/2016/07760005/12OmNwtEEP6", "parentPublication": { "id": "proceedings/iciev/2016/1269/0", "title": "2016 International Conference on Informatics, Electronics and Vision (ICIEV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892386", "title": "Travel in large-scale head-worn VR: Pre-oriented teleportation with WIMs and previews", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892386/12OmNzhELm6", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/09/08031015", "title": "A Vector Field Design Approach to Animated Transitions", "doi": null, "abstractUrl": "/journal/tg/2018/09/08031015/13rRUB7a117", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/05/08998297", "title": "Teleporting through virtual environments: Effects of path scale and environment scale on spatial updating", "doi": null, "abstractUrl": "/journal/tg/2020/05/08998297/1hrXhk9mu9W", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090560", "title": "Either Give Me a Reason to Stand or an Opportunity to Sit in VR", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090560/1jIxzjmEoeY", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a608", "title": "Walking and Teleportation in Wide-area Virtual Reality Experiences", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a608/1pysv8bIfrG", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/09/09332290", "title": "Quantifiable Fine-Grain Occlusion Removal Assistance for Efficient VR Exploration", "doi": null, "abstractUrl": "/journal/tg/2022/09/09332290/1qzsRxXpW4o", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a480", "title": "Analysis of Positional Tracking Space Usage when using Teleportation", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a480/1tnXfrT4ere", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08576679", "articleId": "17D45XreC6e", "__typename": "AdjacentArticleType" }, "next": { "fno": "08554186", "articleId": "17D45WIXbPb", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1js2Mf0SAgw", "name": "ttg202006-08554159s1-transitions-examples.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202006-08554159s1-transitions-examples.mp4", "extension": "mp4", "size": "5.8 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45WIXbPb", "doi": "10.1109/TVCG.2018.2883628", "abstract": "We introduce dynamically warping grids for adaptive liquid simulation. Our primary contributions are a strategy for dynamically deforming regular grids over the course of a simulation and a method for efficiently utilizing these deforming grids for liquid simulation. Prior work has shown that unstructured grids are very effective for adaptive fluid simulations. However, unstructured grids often lead to complicated implementations and a poor cache hit rate due to inconsistent memory access. Regular grids, on the other hand, provide a fast, fixed memory access pattern and straightforward implementation. Our method combines the advantages of both: we leverage the simplicity of regular grids while still achieving practical and controllable spatial adaptivity. We demonstrate that our method enables adaptive simulations that are fast, flexible, and robust to null-space issues. At the same time, our method is simple to implement and takes advantage of existing highly-tuned algorithms.", "abstracts": [ { "abstractType": "Regular", "content": "We introduce dynamically warping grids for adaptive liquid simulation. Our primary contributions are a strategy for dynamically deforming regular grids over the course of a simulation and a method for efficiently utilizing these deforming grids for liquid simulation. Prior work has shown that unstructured grids are very effective for adaptive fluid simulations. However, unstructured grids often lead to complicated implementations and a poor cache hit rate due to inconsistent memory access. Regular grids, on the other hand, provide a fast, fixed memory access pattern and straightforward implementation. Our method combines the advantages of both: we leverage the simplicity of regular grids while still achieving practical and controllable spatial adaptivity. We demonstrate that our method enables adaptive simulations that are fast, flexible, and robust to null-space issues. At the same time, our method is simple to implement and takes advantage of existing highly-tuned algorithms.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We introduce dynamically warping grids for adaptive liquid simulation. Our primary contributions are a strategy for dynamically deforming regular grids over the course of a simulation and a method for efficiently utilizing these deforming grids for liquid simulation. Prior work has shown that unstructured grids are very effective for adaptive fluid simulations. However, unstructured grids often lead to complicated implementations and a poor cache hit rate due to inconsistent memory access. Regular grids, on the other hand, provide a fast, fixed memory access pattern and straightforward implementation. Our method combines the advantages of both: we leverage the simplicity of regular grids while still achieving practical and controllable spatial adaptivity. We demonstrate that our method enables adaptive simulations that are fast, flexible, and robust to null-space issues. At the same time, our method is simple to implement and takes advantage of existing highly-tuned algorithms.", "title": "Simulating Liquids on Dynamically Warping Grids", "normalizedTitle": "Simulating Liquids on Dynamically Warping Grids", "fno": "08554186", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Cache Storage", "Deformation", "Flow Simulation", "Mesh Generation", "Deforming Grids", "Adaptive Liquid Simulation", "Dynamically Warping Grids", "Controllable Spatial Adaptivity", "Fixed Memory Access Pattern", "Regular Grids", "Inconsistent Memory Access", "Adaptive Fluid Simulations", "Unstructured Grids", "Adaptation Models", "Strain", "Liquids", "Computational Modeling", "Streaming Media", "Computer Graphics", "Animation", "Computer Graphics", "Physics Based Animation", "Fluid Simulation", "Liquid", "Adaptivity", "Curvilinear Grids" ], "authors": [ { "givenName": "Hikaru", "surname": "Ibayashi", "fullName": "Hikaru Ibayashi", "affiliation": "Department of Computer Science, University of Southern California, Los Angels, CA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Chris", "surname": "Wojtan", "fullName": "Chris Wojtan", "affiliation": "Visual Computing Group, Institute of Science and Technology Austria, Klosterneuburg, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Nils", "surname": "Thuerey", "fullName": "Nils Thuerey", "affiliation": "Technische Universität München, München, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Takeo", "surname": "Igarashi", "fullName": "Takeo Igarashi", "affiliation": "Department of Computer Science, University of Tokyo, Hongo, Bunkyo-ku, Tokyo, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Ryoichi", "surname": "Ando", "fullName": "Ryoichi Ando", "affiliation": "National Institute of Informatics, Chiyoda-ku, Tokyo, Japan", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2288-2302", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ldav/2017/0617/0/08231854", "title": "Parallel multi-layer ghost cell generation for distributed unstructured grids", "doi": null, "abstractUrl": "/proceedings-article/ldav/2017/08231854/12OmNAKcNJN", "parentPublication": { "id": "proceedings/ldav/2017/0617/0", "title": "2017 IEEE 7th Symposium on Large Data Analysis and Visualization (LDAV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1995/7187/0/71870248", "title": "Vector Plots for Irregular Grids", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1995/71870248/12OmNC2OSMK", "parentPublication": { "id": "proceedings/ieee-vis/1995/7187/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1994/6627/0/00346313", "title": "Visualizing flow over curvilinear grid surfaces using line integral convolution", "doi": null, "abstractUrl": "/proceedings-article/visual/1994/00346313/12OmNyYDDGc", "parentPublication": { "id": "proceedings/visual/1994/6627/0", "title": "Proceedings Visualization '94", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbgames/2011/4648/0/4648a148", "title": "Fluid Animation on Arbitrarily-Shaped Structured Grids", "doi": null, "abstractUrl": "/proceedings-article/sbgames/2011/4648a148/12OmNzb7Zrb", "parentPublication": { "id": "proceedings/sbgames/2011/4648/0", "title": "2011 Brazilian Symposium on Games and Digital Entertainment", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/10/06747389", "title": "Large-Scale Liquid Simulation on Adaptive Hexahedral Grids", "doi": null, "abstractUrl": "/journal/tg/2014/10/06747389/13rRUxYrbMj", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600n3801", "title": "Image Based Reconstruction of Liquids from 2D Surface Detections", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600n3801/1H0LsB06x7q", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08554159", "articleId": "17D45WB0qbp", "__typename": "AdjacentArticleType" }, "next": { "fno": "08543848", "articleId": "17D45VsBU70", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45VsBU70", "doi": "10.1109/TVCG.2018.2883314", "abstract": "This work proposes a new stereo shading architecture that enables adaptive shading rates and automatic shading reuse among triangles and between two views. The proposed pipeline presents several novel features. First, the present sort-middle/bin shading is extended to tile pair-based shading to rasterize and shade pixels at two views simultaneously. A new rasterization algorithm utilizing epipolar geometry is then proposed to schedule tile pairs and perform rasterization at stereo views efficiently. Second, this work presents an adaptive multi-rate shading framework to compute shading on pixels at different rates. A novel tile-based screen space cache and a new cache reuse shader are proposed to perform such multi-rate shading across triangles and views. The results show that the newly proposed method outperforms the standard sort-middle shading and the state-of-the-art multi-rate shading by achieving considerably lower shading cost and memory bandwidth.", "abstracts": [ { "abstractType": "Regular", "content": "This work proposes a new stereo shading architecture that enables adaptive shading rates and automatic shading reuse among triangles and between two views. The proposed pipeline presents several novel features. First, the present sort-middle/bin shading is extended to tile pair-based shading to rasterize and shade pixels at two views simultaneously. A new rasterization algorithm utilizing epipolar geometry is then proposed to schedule tile pairs and perform rasterization at stereo views efficiently. Second, this work presents an adaptive multi-rate shading framework to compute shading on pixels at different rates. A novel tile-based screen space cache and a new cache reuse shader are proposed to perform such multi-rate shading across triangles and views. The results show that the newly proposed method outperforms the standard sort-middle shading and the state-of-the-art multi-rate shading by achieving considerably lower shading cost and memory bandwidth.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This work proposes a new stereo shading architecture that enables adaptive shading rates and automatic shading reuse among triangles and between two views. The proposed pipeline presents several novel features. First, the present sort-middle/bin shading is extended to tile pair-based shading to rasterize and shade pixels at two views simultaneously. A new rasterization algorithm utilizing epipolar geometry is then proposed to schedule tile pairs and perform rasterization at stereo views efficiently. Second, this work presents an adaptive multi-rate shading framework to compute shading on pixels at different rates. A novel tile-based screen space cache and a new cache reuse shader are proposed to perform such multi-rate shading across triangles and views. The results show that the newly proposed method outperforms the standard sort-middle shading and the state-of-the-art multi-rate shading by achieving considerably lower shading cost and memory bandwidth.", "title": "Tile Pair-Based Adaptive Multi-Rate Stereo Shading", "normalizedTitle": "Tile Pair-Based Adaptive Multi-Rate Stereo Shading", "fno": "08543848", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computational Geometry", "Pipeline Processing", "Rendering Computer Graphics", "Sorting", "Stereo Image Processing", "Shading Cost", "Tile Pair Based Adaptive Multirate Stereo Shading", "Memory Bandwidth", "Standard Sort Middle Shading", "Tile Based Screen Space Cache", "Stereo Views", "Tile Pairs", "Rasterization Algorithm", "Automatic Shading Reuse", "Adaptive Shading Rates", "Stereo Shading Architecture", "Rendering Computer Graphics", "Geometry", "Pipelines", "Bandwidth", "Computer Architecture", "Signal Resolution", "Stereo Rendering", "Tile Pair Based Rendering", "Multi Rate Shading" ], "authors": [ { "givenName": "Yazhen", "surname": "Yuan", "fullName": "Yazhen Yuan", "affiliation": "State key laboratory of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Rui", "surname": "Wang", "fullName": "Rui Wang", "affiliation": "State key laboratory of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Hujun", "surname": "Bao", "fullName": "Hujun Bao", "affiliation": "State key laboratory of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2303-2314", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/hcs/2015/8885/0/07477462", "title": "The ARM® Mali-T880 Mobile GPU", "doi": null, "abstractUrl": "/proceedings-article/hcs/2015/07477462/12OmNAS9zPX", "parentPublication": { "id": "proceedings/hcs/2015/8885/0", "title": "2015 IEEE Hot Chips 27 Symposium (HCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgi/1996/7518/0/75180038", "title": "Improved Specular Highlights With Adaptive Shading", "doi": null, "abstractUrl": "/proceedings-article/cgi/1996/75180038/12OmNwBT1ig", "parentPublication": { "id": "proceedings/cgi/1996/7518/0", "title": "Computer Graphics International Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cmpcon/1992/2655/0/00186697", "title": "Scalable graphics enhancements for PA-RISC workstations", "doi": null, "abstractUrl": "/proceedings-article/cmpcon/1992/00186697/12OmNxGSm2u", "parentPublication": { "id": "proceedings/cmpcon/1992/2655/0", "title": "COMPCON Spring 1992", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391c030", "title": "Registering Images to Untextured Geometry Using Average Shading Gradients", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391c030/12OmNyLiuzk", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/searis/2014/9955/0/07152799", "title": "guacamole - An extensible scene graph and rendering framework based on deferred shading", "doi": null, "abstractUrl": "/proceedings-article/searis/2014/07152799/12OmNzA6GLj", "parentPublication": { "id": "proceedings/searis/2014/9955/0", "title": "2014 IEEE 7th Workshop on Software Engineering and Architectures for Realtime Interactive Systems (SEARIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/01/07194844", "title": "Anisotropic Ambient Volume Shading", "doi": null, "abstractUrl": "/journal/tg/2016/01/07194844/13rRUB7a1fT", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2017/03/07452621", "title": "Shape Estimation from Shading, Defocus, and Correspondence Using Light-Field Angular Coherence", "doi": null, "abstractUrl": "/journal/tp/2017/03/07452621/13rRUxYIN5A", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/10/07501796", "title": "Aggregate G-Buffer Anti-Aliasing -Extended Version-", "doi": null, "abstractUrl": "/journal/tg/2016/10/07501796/13rRUyv53Fw", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpca/2019/1444/0/144400a623", "title": "Rendering Elimination: Early Discard of Redundant Tiles in the Graphics Pipeline", "doi": null, "abstractUrl": "/proceedings-article/hpca/2019/144400a623/18M7PSwaQkE", "parentPublication": { "id": "proceedings/hpca/2019/1444/0", "title": "2019 IEEE International Symposium on High Performance Computer Architecture (HPCA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600g177", "title": "Multi-View Mesh Reconstruction with Neural Deferred Shading", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600g177/1H0NScvhUC4", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08554186", "articleId": "17D45WIXbPb", "__typename": "AdjacentArticleType" }, "next": { "fno": "08580399", "articleId": "17D45VUZMU0", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1js2HXZuFJS", "name": "ttg202006-08543848s1-supplemental_video.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202006-08543848s1-supplemental_video.mp4", "extension": "mp4", "size": "96.1 MB", "__typename": "WebExtraType" }, { "id": "1js2MuR0GjK", "name": "ttg202006-08543848s1-supplemental_document.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202006-08543848s1-supplemental_document.pdf", "extension": "pdf", "size": "52.1 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45VUZMU0", "doi": "10.1109/TVCG.2018.2887379", "abstract": "Virtual reality (VR) has enjoyed significant popularity in recent years. Where navigation has been a fundamental appeal of 3D applications for decades, facilitating this in VR has been quite a challenge. Over the past decades, various virtual locomotion techniques (VLTs) have been developed that aim to offer natural, usable and efficient ways of navigating VR without inducing VR sickness. Several studies of these techniques have been conducted in order to evaluate their performance in various study conditions and virtual contexts. Taxonomies have also been proposed to either place similar techniques in meaningful categories or decompose them to their underlying design components. In this survey, we aim to aggregate and understand the current state of the art of VR locomotion research and discuss the design implications of VLTs in terms of strengths, weaknesses and applicability.", "abstracts": [ { "abstractType": "Regular", "content": "Virtual reality (VR) has enjoyed significant popularity in recent years. Where navigation has been a fundamental appeal of 3D applications for decades, facilitating this in VR has been quite a challenge. Over the past decades, various virtual locomotion techniques (VLTs) have been developed that aim to offer natural, usable and efficient ways of navigating VR without inducing VR sickness. Several studies of these techniques have been conducted in order to evaluate their performance in various study conditions and virtual contexts. Taxonomies have also been proposed to either place similar techniques in meaningful categories or decompose them to their underlying design components. In this survey, we aim to aggregate and understand the current state of the art of VR locomotion research and discuss the design implications of VLTs in terms of strengths, weaknesses and applicability.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Virtual reality (VR) has enjoyed significant popularity in recent years. Where navigation has been a fundamental appeal of 3D applications for decades, facilitating this in VR has been quite a challenge. Over the past decades, various virtual locomotion techniques (VLTs) have been developed that aim to offer natural, usable and efficient ways of navigating VR without inducing VR sickness. Several studies of these techniques have been conducted in order to evaluate their performance in various study conditions and virtual contexts. Taxonomies have also been proposed to either place similar techniques in meaningful categories or decompose them to their underlying design components. In this survey, we aim to aggregate and understand the current state of the art of VR locomotion research and discuss the design implications of VLTs in terms of strengths, weaknesses and applicability.", "title": "Virtual Locomotion: A Survey", "normalizedTitle": "Virtual Locomotion: A Survey", "fno": "08580399", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Virtual Reality", "Navigation", "Virtual Locomotion Techniques", "VLT", "VR Sickness", "VR Locomotion", "Navigation", "Legged Locomotion", "Task Analysis", "Monitoring", "Visualization", "Three Dimensional Displays", "Space Exploration", "Virtual Reality", "Virtual Locomotion", "Virtual Navigation", "Survey", "Taxonomy" ], "authors": [ { "givenName": "Majed", "surname": "Al Zayer", "fullName": "Majed Al Zayer", "affiliation": "University of Nevada, Reno, NV, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Paul", "surname": "MacNeilage", "fullName": "Paul MacNeilage", "affiliation": "University of Nevada, Reno, NV, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Eelke", "surname": "Folmer", "fullName": "Eelke Folmer", "affiliation": "University of Nevada, Reno, NV, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2315-2334", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892348", "title": "Steering locomotion by vestibular perturbation in room-scale VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892348/12OmNvrMUgU", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2012/1204/0/06184180", "title": "From virtual to actual mobility: Assessing the benefits of active locomotion through an immersive virtual environment using a motorized wheelchair", "doi": null, "abstractUrl": "/proceedings-article/3dui/2012/06184180/12OmNxdDFLw", "parentPublication": { "id": "proceedings/3dui/2012/1204/0", "title": "2012 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446130", "title": "Rapid, Continuous Movement Between Nodes as an Accessible Virtual Reality Locomotion Technique", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446130/13bd1f3HvEx", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/07/07946183", "title": "Walking with Virtual People: Evaluation of Locomotion Interfaces in Dynamic Environments", "doi": null, "abstractUrl": "/journal/tg/2018/07/07946183/13rRUEgs2C2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vs-games/2018/7123/0/08493432", "title": "LUTE: A Locomotion Usability Test Environmentfor Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2018/08493432/14tNJmUlJD4", "parentPublication": { "id": "proceedings/vs-games/2018/7123/0", "title": "2018 10th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/05/09714054", "title": "Remote research on locomotion interfaces for virtual reality: Replication of a lab-based study on teleporting interfaces", "doi": null, "abstractUrl": "/journal/tg/2022/05/09714054/1B0XZAXWaIg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09744001", "title": "Influence of user posture and virtual exercise on impression of locomotion during VR observation", "doi": null, "abstractUrl": "/journal/tg/5555/01/09744001/1C8BFV420lq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a696", "title": "Seamless-walk: Novel Natural Virtual Reality Locomotion Method with a High-Resolution Tactile Sensor", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a696/1CJeXaYYtd6", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09761724", "title": "Effects of Transfer Functions and Body Parts on Body-centric Locomotion in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/5555/01/09761724/1CKMkLCKOSk", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a452", "title": "Studying the Inter-Relation Between Locomotion Techniques and Embodiment in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a452/1pysvNRUnD2", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08543848", "articleId": "17D45VsBU70", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNAXPyfa", "title": "January-March", "year": "1998", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "4", "label": "January-March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwgQpDb", "doi": "10.1109/2945.675647", "abstract": "Abstract—Camera calibration and the acquisition of Euclidean 3D measurements have so far been considered necessary requirements for overlaying three-dimensional graphical objects with live video. In this article, we describe a new approach to video-based augmented reality that avoids both requirements: It does not use any metric information about the calibration parameters of the camera or the 3D locations and dimensions of the environment's objects. The only requirement is the ability to track across frames at least four fiducial points that are specified by the user during system initialization and whose world coordinates are unknown.Our approach is based on the following observation: Given a set of four or more noncoplanar 3D points, the projection of all points in the set can be computed as a linear combination of the projections of just four of the points. We exploit this observation by 1) tracking regions and color fiducial points at frame rate, and 2) representing virtual objects in a non-Euclidean, affine frame of reference that allows their projection to be computed as a linear combination of the projection of the fiducial points. Experimental results on two augmented reality systems, one monitor-based and one head-mounted, demonstrate that the approach is readily implementable, imposes minimal computational and hardware requirements, and generates real-time and accurate video overlays even when the camera parameters vary dynamically.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—Camera calibration and the acquisition of Euclidean 3D measurements have so far been considered necessary requirements for overlaying three-dimensional graphical objects with live video. In this article, we describe a new approach to video-based augmented reality that avoids both requirements: It does not use any metric information about the calibration parameters of the camera or the 3D locations and dimensions of the environment's objects. The only requirement is the ability to track across frames at least four fiducial points that are specified by the user during system initialization and whose world coordinates are unknown.Our approach is based on the following observation: Given a set of four or more noncoplanar 3D points, the projection of all points in the set can be computed as a linear combination of the projections of just four of the points. We exploit this observation by 1) tracking regions and color fiducial points at frame rate, and 2) representing virtual objects in a non-Euclidean, affine frame of reference that allows their projection to be computed as a linear combination of the projection of the fiducial points. Experimental results on two augmented reality systems, one monitor-based and one head-mounted, demonstrate that the approach is readily implementable, imposes minimal computational and hardware requirements, and generates real-time and accurate video overlays even when the camera parameters vary dynamically.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—Camera calibration and the acquisition of Euclidean 3D measurements have so far been considered necessary requirements for overlaying three-dimensional graphical objects with live video. In this article, we describe a new approach to video-based augmented reality that avoids both requirements: It does not use any metric information about the calibration parameters of the camera or the 3D locations and dimensions of the environment's objects. The only requirement is the ability to track across frames at least four fiducial points that are specified by the user during system initialization and whose world coordinates are unknown.Our approach is based on the following observation: Given a set of four or more noncoplanar 3D points, the projection of all points in the set can be computed as a linear combination of the projections of just four of the points. We exploit this observation by 1) tracking regions and color fiducial points at frame rate, and 2) representing virtual objects in a non-Euclidean, affine frame of reference that allows their projection to be computed as a linear combination of the projection of the fiducial points. Experimental results on two augmented reality systems, one monitor-based and one head-mounted, demonstrate that the approach is readily implementable, imposes minimal computational and hardware requirements, and generates real-time and accurate video overlays even when the camera parameters vary dynamically.", "title": "Calibration-Free Augmented Reality", "normalizedTitle": "Calibration-Free Augmented Reality", "fno": "v0001", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Augmented Reality", "Real Time Computer Vision", "Calibration", "Registration", "Affine Representations", "Feature Tracking", "3 D Interaction Techniques" ], "authors": [ { "givenName": "Kiriakos N.", "surname": "Kutulakos", "fullName": "Kiriakos N. Kutulakos", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "James R.", "surname": "Vallino", "fullName": "James R. Vallino", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "01", "pubDate": "1998-01-01 00:00:00", "pubType": "trans", "pages": "1-20", "year": "1998", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": null, "next": { "fno": "v0021", "articleId": "13rRUNvgyW9", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNAXPyfa", "title": "January-March", "year": "1998", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "4", "label": "January-March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUNvgyW9", "doi": "10.1109/2945.675649", "abstract": "Abstract—Collision detection is of paramount importance for many applications in computer graphics and visualization. Typically, the input to a collision detection algorithm is a large number of geometric objects comprising an environment, together with a set of objects moving within the environment. In addition to determining accurately the contacts that occur between pairs of objects, one needs also to do so at real-time rates. Applications such as haptic force-feedback can require over 1,000 collision queries per second.In this paper, we develop and analyze a method, based on bounding-volume hierarchies, for efficient collision detection for objects moving within highly complex environments. Our choice of bounding volume is to use a \"discrete orientation polytope\" (\"k-dop\"), a convex polytope whose facets are determined by halfspaces whose outward normals come from a small fixed set of k orientations. We compare a variety of methods for constructing hierarchies (\"BV-trees\") of bounding k-dops. Further, we propose algorithms for maintaining an effective BV-tree of k-dops for moving objects, as they rotate, and for performing fast collision detection using BV-trees of the moving objects and of the environment.Our algorithms have been implemented and tested. We provide experimental evidence showing that our approach yields substantially faster collision detection than previous methods.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—Collision detection is of paramount importance for many applications in computer graphics and visualization. Typically, the input to a collision detection algorithm is a large number of geometric objects comprising an environment, together with a set of objects moving within the environment. In addition to determining accurately the contacts that occur between pairs of objects, one needs also to do so at real-time rates. Applications such as haptic force-feedback can require over 1,000 collision queries per second.In this paper, we develop and analyze a method, based on bounding-volume hierarchies, for efficient collision detection for objects moving within highly complex environments. Our choice of bounding volume is to use a \"discrete orientation polytope\" (\"k-dop\"), a convex polytope whose facets are determined by halfspaces whose outward normals come from a small fixed set of k orientations. We compare a variety of methods for constructing hierarchies (\"BV-trees\") of bounding k-dops. Further, we propose algorithms for maintaining an effective BV-tree of k-dops for moving objects, as they rotate, and for performing fast collision detection using BV-trees of the moving objects and of the environment.Our algorithms have been implemented and tested. We provide experimental evidence showing that our approach yields substantially faster collision detection than previous methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—Collision detection is of paramount importance for many applications in computer graphics and visualization. Typically, the input to a collision detection algorithm is a large number of geometric objects comprising an environment, together with a set of objects moving within the environment. In addition to determining accurately the contacts that occur between pairs of objects, one needs also to do so at real-time rates. Applications such as haptic force-feedback can require over 1,000 collision queries per second.In this paper, we develop and analyze a method, based on bounding-volume hierarchies, for efficient collision detection for objects moving within highly complex environments. Our choice of bounding volume is to use a \"discrete orientation polytope\" (\"k-dop\"), a convex polytope whose facets are determined by halfspaces whose outward normals come from a small fixed set of k orientations. We compare a variety of methods for constructing hierarchies (\"BV-trees\") of bounding k-dops. Further, we propose algorithms for maintaining an effective BV-tree of k-dops for moving objects, as they rotate, and for performing fast collision detection using BV-trees of the moving objects and of the environment.Our algorithms have been implemented and tested. We provide experimental evidence showing that our approach yields substantially faster collision detection than previous methods.", "title": "Efficient Collision Detection Using Bounding Volume Hierarchies of k-DOPs", "normalizedTitle": "Efficient Collision Detection Using Bounding Volume Hierarchies of k-DOPs", "fno": "v0021", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Collision Detection", "Intersection Searching", "Bounding Volume Hierarchies", "Discrete Orientation Polytopes", "Bounding Boxes", "Virtual Reality", "Virtual Environments" ], "authors": [ { "givenName": "James T.", "surname": "Klosowski", "fullName": "James T. Klosowski", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Martin", "surname": "Held", "fullName": "Martin Held", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Joseph S.B.", "surname": "Mitchell", "fullName": "Joseph S.B. Mitchell", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Henry", "surname": "Sowizral", "fullName": "Henry Sowizral", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Karel", "surname": "Zikan", "fullName": "Karel Zikan", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "01", "pubDate": "1998-01-01 00:00:00", "pubType": "trans", "pages": "21-36", "year": "1998", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "v0001", "articleId": "13rRUwgQpDb", "__typename": "AdjacentArticleType" }, "next": { "fno": "v0037", "articleId": "13rRUwwaKsU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNAXPyfa", "title": "January-March", "year": "1998", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "4", "label": "January-March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwwaKsU", "doi": "10.1109/2945.675650", "abstract": "Abstract—This paper describes a volume rendering system for unstructured data, especially finite element data, that creates images with very high accuracy. The system will currently handle meshes whose cells are either linear or quadratic tetrahedra. Compromises or approximations are not introduced for the sake of efficiency. Whenever possible, exact mathematical solutions for the radiance integrals involved and for interpolation are used. The system will also handle meshes with mixed cell types: tetrahedra, bricks, prisms, wedges, and pyramids, but not with high accuracy. Accurate semitransparent shaded isosurfaces may be embedded in the volume rendering. For very small cells, subpixel accumulation by splatting is used to avoid sampling error. A revision to an existing accurate visibility ordering algorithm is described, which includes a correction and a method for dramatically increasing its efficiency. Finally, hardware assisted projection and compositing are extended from tetrahedra to arbitrary convex polyhedra.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—This paper describes a volume rendering system for unstructured data, especially finite element data, that creates images with very high accuracy. The system will currently handle meshes whose cells are either linear or quadratic tetrahedra. Compromises or approximations are not introduced for the sake of efficiency. Whenever possible, exact mathematical solutions for the radiance integrals involved and for interpolation are used. The system will also handle meshes with mixed cell types: tetrahedra, bricks, prisms, wedges, and pyramids, but not with high accuracy. Accurate semitransparent shaded isosurfaces may be embedded in the volume rendering. For very small cells, subpixel accumulation by splatting is used to avoid sampling error. A revision to an existing accurate visibility ordering algorithm is described, which includes a correction and a method for dramatically increasing its efficiency. Finally, hardware assisted projection and compositing are extended from tetrahedra to arbitrary convex polyhedra.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—This paper describes a volume rendering system for unstructured data, especially finite element data, that creates images with very high accuracy. The system will currently handle meshes whose cells are either linear or quadratic tetrahedra. Compromises or approximations are not introduced for the sake of efficiency. Whenever possible, exact mathematical solutions for the radiance integrals involved and for interpolation are used. The system will also handle meshes with mixed cell types: tetrahedra, bricks, prisms, wedges, and pyramids, but not with high accuracy. Accurate semitransparent shaded isosurfaces may be embedded in the volume rendering. For very small cells, subpixel accumulation by splatting is used to avoid sampling error. A revision to an existing accurate visibility ordering algorithm is described, which includes a correction and a method for dramatically increasing its efficiency. Finally, hardware assisted projection and compositing are extended from tetrahedra to arbitrary convex polyhedra.", "title": "A High Accuracy Volume Renderer for Unstructured Data", "normalizedTitle": "A High Accuracy Volume Renderer for Unstructured Data", "fno": "v0037", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Volume Rendering", "Unstructured Meshes", "High Accuracy", "Finite Element Method", "Isosurfaces", "Splatting", "Cell Projection", "Visibility Ordering", "Depth Sorting" ], "authors": [ { "givenName": "Peter L.", "surname": "Williams", "fullName": "Peter L. Williams", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Nelson L.", "surname": "Max", "fullName": "Nelson L. Max", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Clifford M.", "surname": "Stein", "fullName": "Clifford M. Stein", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "01", "pubDate": "1998-01-01 00:00:00", "pubType": "trans", "pages": "37-54", "year": "1998", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "v0021", "articleId": "13rRUNvgyW9", "__typename": "AdjacentArticleType" }, "next": { "fno": "v0055", "articleId": "13rRUxly95q", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNAXPyfa", "title": "January-March", "year": "1998", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "4", "label": "January-March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxly95q", "doi": "10.1109/2945.675652", "abstract": "Abstract—Complex repetitive scenes containing forests, foliage, grass, hair, or fur, are challenging for common modeling and rendering tools. The amount of data, the tediousness of modeling and animation tasks, and the cost of realistic rendering have caused such kind of scene to see only limited use even in high-end productions. We describe here how the use of volumetric textures is well suited to such scenes. These primitives can greatly simplify modeling and animation tasks. More importantly, they can be very efficiently rendered using ray tracing with few aliasing artifacts. The main idea, initially introduced by Kajiya and Kay [9], is to represent a pattern of 3D geometry in a reference volume, that is tiled over an underlying surface much like a regular 2D texture. In our contribution, the mapping is independent of the mesh subdivision, the pattern can contain any kind of shape, and it is prefiltered at different scales as for MIP-mapping. Although the model encoding is volumetric, the rendering method differs greatly from traditional volume rendering: A volumetric texture only exists in the neighborhood of a surface, and the repeated instances (called texels) of the reference volume are spatially deformed. Furthermore, each voxel of the reference volume contains a key feature which controls the reflectance function that represents aggregate intravoxel geometry. This allows for ray-tracing of highly complex scenes with very few aliasing artifacts, using a single ray per pixel (for the part of the scene using the volumetric texture representation). The major technical considerations of our method lie in the ray-path determination and in the specification of the reflectance function.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—Complex repetitive scenes containing forests, foliage, grass, hair, or fur, are challenging for common modeling and rendering tools. The amount of data, the tediousness of modeling and animation tasks, and the cost of realistic rendering have caused such kind of scene to see only limited use even in high-end productions. We describe here how the use of volumetric textures is well suited to such scenes. These primitives can greatly simplify modeling and animation tasks. More importantly, they can be very efficiently rendered using ray tracing with few aliasing artifacts. The main idea, initially introduced by Kajiya and Kay [9], is to represent a pattern of 3D geometry in a reference volume, that is tiled over an underlying surface much like a regular 2D texture. In our contribution, the mapping is independent of the mesh subdivision, the pattern can contain any kind of shape, and it is prefiltered at different scales as for MIP-mapping. Although the model encoding is volumetric, the rendering method differs greatly from traditional volume rendering: A volumetric texture only exists in the neighborhood of a surface, and the repeated instances (called texels) of the reference volume are spatially deformed. Furthermore, each voxel of the reference volume contains a key feature which controls the reflectance function that represents aggregate intravoxel geometry. This allows for ray-tracing of highly complex scenes with very few aliasing artifacts, using a single ray per pixel (for the part of the scene using the volumetric texture representation). The major technical considerations of our method lie in the ray-path determination and in the specification of the reflectance function.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—Complex repetitive scenes containing forests, foliage, grass, hair, or fur, are challenging for common modeling and rendering tools. The amount of data, the tediousness of modeling and animation tasks, and the cost of realistic rendering have caused such kind of scene to see only limited use even in high-end productions. We describe here how the use of volumetric textures is well suited to such scenes. These primitives can greatly simplify modeling and animation tasks. More importantly, they can be very efficiently rendered using ray tracing with few aliasing artifacts. The main idea, initially introduced by Kajiya and Kay [9], is to represent a pattern of 3D geometry in a reference volume, that is tiled over an underlying surface much like a regular 2D texture. In our contribution, the mapping is independent of the mesh subdivision, the pattern can contain any kind of shape, and it is prefiltered at different scales as for MIP-mapping. Although the model encoding is volumetric, the rendering method differs greatly from traditional volume rendering: A volumetric texture only exists in the neighborhood of a surface, and the repeated instances (called texels) of the reference volume are spatially deformed. Furthermore, each voxel of the reference volume contains a key feature which controls the reflectance function that represents aggregate intravoxel geometry. This allows for ray-tracing of highly complex scenes with very few aliasing artifacts, using a single ray per pixel (for the part of the scene using the volumetric texture representation). The major technical considerations of our method lie in the ray-path determination and in the specification of the reflectance function.", "title": "Modeling, Animating, and Rendering Complex Scenes Using Volumetric Textures", "normalizedTitle": "Modeling, Animating, and Rendering Complex Scenes Using Volumetric Textures", "fno": "v0055", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Volumetric Textures", "Complex Geometry", "Levels Of Detail" ], "authors": [ { "givenName": "Fabrice", "surname": "Neyret", "fullName": "Fabrice Neyret", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "01", "pubDate": "1998-01-01 00:00:00", "pubType": "trans", "pages": "55-70", "year": "1998", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "v0037", "articleId": "13rRUwwaKsU", "__typename": "AdjacentArticleType" }, "next": { "fno": "v0071", "articleId": "13rRUB7a1fF", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNAXPyfa", "title": "January-March", "year": "1998", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "4", "label": "January-March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUB7a1fF", "doi": "10.1109/2945.675655", "abstract": "Abstract—A technique is presented for line art rendering of scenes composed of freeform surfaces. The line art that is created for parametric surfaces is practically intrinsic and is globally invariant to changes in the surface parameterization. This method is equally applicable for line art rendering of implicit forms, creating a unified line art rendering method for both parametric and implicit forms. This added flexibility exposes a new horizon of special, parameterization independent, line art effects. Moreover, the production of the line art illustrations can be combined with traditional rendering techniques such as transparency and texture mapping. Examples that demonstrate the capabilities of the proposed approach are presented for both the parametric and implicit forms.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—A technique is presented for line art rendering of scenes composed of freeform surfaces. The line art that is created for parametric surfaces is practically intrinsic and is globally invariant to changes in the surface parameterization. This method is equally applicable for line art rendering of implicit forms, creating a unified line art rendering method for both parametric and implicit forms. This added flexibility exposes a new horizon of special, parameterization independent, line art effects. Moreover, the production of the line art illustrations can be combined with traditional rendering techniques such as transparency and texture mapping. Examples that demonstrate the capabilities of the proposed approach are presented for both the parametric and implicit forms.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—A technique is presented for line art rendering of scenes composed of freeform surfaces. The line art that is created for parametric surfaces is practically intrinsic and is globally invariant to changes in the surface parameterization. This method is equally applicable for line art rendering of implicit forms, creating a unified line art rendering method for both parametric and implicit forms. This added flexibility exposes a new horizon of special, parameterization independent, line art effects. Moreover, the production of the line art illustrations can be combined with traditional rendering techniques such as transparency and texture mapping. Examples that demonstrate the capabilities of the proposed approach are presented for both the parametric and implicit forms.", "title": "Line Art Illustrations of Parametric and Implicit Forms", "normalizedTitle": "Line Art Illustrations of Parametric and Implicit Forms", "fno": "v0071", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Sketches", "Illustrations", "Line Drawings", "Freeform Surfaces", "NUR Bs", "Implicit Forms", "Surface Coverage", "Printing" ], "authors": [ { "givenName": "Gershon", "surname": "Elber", "fullName": "Gershon Elber", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "01", "pubDate": "1998-01-01 00:00:00", "pubType": "trans", "pages": "71-81", "year": "1998", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "v0055", "articleId": "13rRUxly95q", "__typename": "AdjacentArticleType" }, "next": { "fno": "v0082", "articleId": "13rRUx0geuY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNAXPyfa", "title": "January-March", "year": "1998", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "4", "label": "January-March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUx0geuY", "doi": "10.1109/2945.675656", "abstract": "Abstract—A terrain is most often represented with a digital elevation map consisting of a set of sample points from the terrain surface. This paper presents a fast and practical algorithm to compute the horizon, or skyline, at all sample points of a terrain. The horizons are useful in a number of applications, including the rendering of self-shadowing displacement maps, visibility culling for faster flight simulation, and rendering of cartographic data. Experimental and theoretical results are presented which show that the algorithm is more accurate that previous algorithms and is faster than previous algorithms in terrains of more than 100,000 sample points.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—A terrain is most often represented with a digital elevation map consisting of a set of sample points from the terrain surface. This paper presents a fast and practical algorithm to compute the horizon, or skyline, at all sample points of a terrain. The horizons are useful in a number of applications, including the rendering of self-shadowing displacement maps, visibility culling for faster flight simulation, and rendering of cartographic data. Experimental and theoretical results are presented which show that the algorithm is more accurate that previous algorithms and is faster than previous algorithms in terrains of more than 100,000 sample points.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—A terrain is most often represented with a digital elevation map consisting of a set of sample points from the terrain surface. This paper presents a fast and practical algorithm to compute the horizon, or skyline, at all sample points of a terrain. The horizons are useful in a number of applications, including the rendering of self-shadowing displacement maps, visibility culling for faster flight simulation, and rendering of cartographic data. Experimental and theoretical results are presented which show that the algorithm is more accurate that previous algorithms and is faster than previous algorithms in terrains of more than 100,000 sample points.", "title": "Fast Horizon Computation at All Points of a Terrain With Visibility and Shading Applications", "normalizedTitle": "Fast Horizon Computation at All Points of a Terrain With Visibility and Shading Applications", "fno": "v0082", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Terrain", "Digital Elevation Map", "Horizon", "Skyline", "Visibility", "Shadows", "Rendering", "GIS" ], "authors": [ { "givenName": "A. James", "surname": "Stewart", "fullName": "A. James Stewart", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "01", "pubDate": "1998-01-01 00:00:00", "pubType": "trans", "pages": "82-93", "year": "1998", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "v0071", "articleId": "13rRUB7a1fF", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZpr3", "title": "Sept.", "year": "2013", "issueNum": "09", "idPrefix": "tg", "pubType": "journal", "volume": "19", "label": "Sept.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwgyOjk", "doi": "10.1109/TVCG.2013.63", "abstract": "Poisson disk sampling has excellent spatial and spectral properties, and plays an important role in a variety of visual computing. Although many promising algorithms have been proposed for multidimensional sampling in euclidean space, very few studies have been reported with regard to the problem of generating Poisson disks on surfaces due to the complicated nature of the surface. This paper presents an intrinsic algorithm for parallel Poisson disk sampling on arbitrary surfaces. In sharp contrast to the conventional parallel approaches, our method neither partitions the given surface into small patches nor uses any spatial data structure to maintain the voids in the sampling domain. Instead, our approach assigns each sample candidate a random and unique priority that is unbiased with regard to the distribution. Hence, multiple threads can process the candidates simultaneously and resolve conflicts by checking the given priority values. Our algorithm guarantees that the generated Poisson disks are uniformly and randomly distributed without bias. It is worth noting that our method is intrinsic and independent of the embedding space. This intrinsic feature allows us to generate Poisson disk patterns on arbitrary surfaces in IRn. To our knowledge, this is the first intrinsic, parallel, and accurate algorithm for surface Poisson disk sampling. Furthermore, by manipulating the spatially varying density function, we can obtain adaptive sampling easily.", "abstracts": [ { "abstractType": "Regular", "content": "Poisson disk sampling has excellent spatial and spectral properties, and plays an important role in a variety of visual computing. Although many promising algorithms have been proposed for multidimensional sampling in euclidean space, very few studies have been reported with regard to the problem of generating Poisson disks on surfaces due to the complicated nature of the surface. This paper presents an intrinsic algorithm for parallel Poisson disk sampling on arbitrary surfaces. In sharp contrast to the conventional parallel approaches, our method neither partitions the given surface into small patches nor uses any spatial data structure to maintain the voids in the sampling domain. Instead, our approach assigns each sample candidate a random and unique priority that is unbiased with regard to the distribution. Hence, multiple threads can process the candidates simultaneously and resolve conflicts by checking the given priority values. Our algorithm guarantees that the generated Poisson disks are uniformly and randomly distributed without bias. It is worth noting that our method is intrinsic and independent of the embedding space. This intrinsic feature allows us to generate Poisson disk patterns on arbitrary surfaces in IRn. To our knowledge, this is the first intrinsic, parallel, and accurate algorithm for surface Poisson disk sampling. Furthermore, by manipulating the spatially varying density function, we can obtain adaptive sampling easily.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Poisson disk sampling has excellent spatial and spectral properties, and plays an important role in a variety of visual computing. Although many promising algorithms have been proposed for multidimensional sampling in euclidean space, very few studies have been reported with regard to the problem of generating Poisson disks on surfaces due to the complicated nature of the surface. This paper presents an intrinsic algorithm for parallel Poisson disk sampling on arbitrary surfaces. In sharp contrast to the conventional parallel approaches, our method neither partitions the given surface into small patches nor uses any spatial data structure to maintain the voids in the sampling domain. Instead, our approach assigns each sample candidate a random and unique priority that is unbiased with regard to the distribution. Hence, multiple threads can process the candidates simultaneously and resolve conflicts by checking the given priority values. Our algorithm guarantees that the generated Poisson disks are uniformly and randomly distributed without bias. It is worth noting that our method is intrinsic and independent of the embedding space. This intrinsic feature allows us to generate Poisson disk patterns on arbitrary surfaces in IRn. To our knowledge, this is the first intrinsic, parallel, and accurate algorithm for surface Poisson disk sampling. Furthermore, by manipulating the spatially varying density function, we can obtain adaptive sampling easily.", "title": "An Intrinsic Algorithm for Parallel Poisson Disk Sampling on Arbitrary Surfaces", "normalizedTitle": "An Intrinsic Algorithm for Parallel Poisson Disk Sampling on Arbitrary Surfaces", "fno": "ttg2013091425", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Instruction Sets", "Partitioning Algorithms", "Data Structures", "Approximation Algorithms", "Algorithm Design And Analysis", "Spatial Databases", "Geodesic Distance", "Parallel Poisson Disk Sampling", "Intrinsic Algorithm", "Unbiased Sampling", "GPU" ], "authors": [ { "givenName": null, "surname": "Xiang Ying", "fullName": "Xiang Ying", "affiliation": "Sch. of Comput. Eng., Nanyang Technol. Univ., Singapore, Singapore", "__typename": "ArticleAuthorType" }, { "givenName": null, "surname": "Shi-Qing Xin", "fullName": "Shi-Qing Xin", "affiliation": "Sch. of Comput. Eng., Nanyang Technol. Univ., Singapore, Singapore", "__typename": "ArticleAuthorType" }, { "givenName": null, "surname": "Qian Sun", "fullName": "Qian Sun", "affiliation": "Sch. of Comput. Eng., Nanyang Technol. Univ., Singapore, Singapore", "__typename": "ArticleAuthorType" }, { "givenName": null, "surname": "Ying He", "fullName": "Ying He", "affiliation": "Sch. of Comput. Eng., Nanyang Technol. Univ., Singapore, Singapore", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "09", "pubDate": "2013-09-01 00:00:00", "pubType": "trans", "pages": "1425-1437", "year": "2013", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2013/4989/0/4989a233", "title": "Intrinsic Characterization of Dynamic Surfaces", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2013/4989a233/12OmNx7ov3C", "parentPublication": { "id": "proceedings/cvpr/2013/4989/0", "title": "2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2015/9403/0/9403a090", "title": "Writing Chinese Calligraphy on Arbitrary Surfaces", "doi": null, "abstractUrl": "/proceedings-article/cw/2015/9403a090/12OmNybfr6u", "parentPublication": { "id": "proceedings/cw/2015/9403/0", "title": "2015 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isip/2010/4261/0/4261a462", "title": "The Split Bregman Method for Image Diffusion on Implicit Surfaces", "doi": null, "abstractUrl": "/proceedings-article/isip/2010/4261a462/12OmNyuPLp1", "parentPublication": { "id": "proceedings/isip/2010/4261/0", "title": "2010 Third International Symposium on Information Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rt/2007/1629/0/04342600", "title": "Poisson Disk Point Sets by Hierarchical Dart Throwing", "doi": null, "abstractUrl": "/proceedings-article/rt/2007/04342600/12OmNzFMFqN", "parentPublication": { "id": "proceedings/rt/2007/1629/0", "title": "IEEE/ EG Symposium on Interactive Ray Tracing 2007", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2017/0733/0/0733a690", "title": "Poisson Disk Sampling on the Grassmannnian: Applications in Subspace Optimization", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2017/0733a690/12OmNzTYC7k", "parentPublication": { "id": "proceedings/cvprw/2017/0733/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/05/ttg2008050982", "title": "Dual Poisson-Disk Tiling: An Efficient Method for Distributing Features on Arbitrary Surfaces", "doi": null, "abstractUrl": "/journal/tg/2008/05/ttg2008050982/13rRUwI5UfY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/06/07927461", "title": "Good Random Multi-Triangulation of Surfaces", "doi": null, "abstractUrl": "/journal/tg/2018/06/07927461/13rRUxly9e2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/04/ttg2008040805", "title": "Globally Optimal Surface Mapping for Surfaces with Arbitrary Topology", "doi": null, "abstractUrl": "/journal/tg/2008/04/ttg2008040805/13rRUygT7su", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisce/2018/5500/0/550000b153", "title": "Mixed Tensor Product of q-Bezier-Poisson Surfaces", "doi": null, "abstractUrl": "/proceedings-article/icisce/2018/550000b153/17D45WgziRw", "parentPublication": { "id": "proceedings/icisce/2018/5500/0", "title": "2018 5th International Conference on Information Science and Control Engineering (ICISCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/culture-and-computing/2017/1135/0/08227334", "title": "Improving Transparent Visualization of Large-Scale Laser-Scanned Point Clouds by Using Poisson Disk Sampling", "doi": null, "abstractUrl": "/proceedings-article/culture-and-computing/2017/08227334/17D45XERmmb", "parentPublication": { "id": "proceedings/culture-and-computing/2017/1135/0", "title": "2017 International Conference on Culture and Computing (Culture and Computing)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": null, "next": { "fno": "ttg2013091438", "articleId": "13rRUxASuGk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }

Dataset Card for "TVCG_Papers"

  • Dataset Description Repository: Paper: Leaderboard: Point of Contact:

  • Dataset Summary: This dataset contains 5178 papers from IEEE TVCG. It contains multiple raw attributes of each paper, including both meta-data and abstract.

  • Dataset Structure: Jsonl file, each paper instance is a json object.

  • Data Fields:

FEATURE = Features({
    'data': Features({
        'issue': Features({
            'id': Value(dtype='string'),
            'title': Value(dtype='string'),
            'year': Value(dtype='string'),
            'issueNum': Value(dtype='string'),
            'idPrefix': Value(dtype='string'),
            'pubType': Value(dtype='string'),
            'volume': Value(dtype='string'),
            'label': Value(dtype='string'),
            'downloadables': Features({
                'hasCover': Value(dtype='bool'),
                '__typename': Value(dtype='string')
              }),
             '__typename': Value(dtype='string')
            }),
        'article': Features({
            'id': Value(dtype='string'),
            'doi': Value(dtype='string'),
            'abstract': Value(dtype='string'),
            'abstracts': [
                {
                    'abstractType': Value(dtype='string'),
                    'content': Value(dtype='string'),
                    '__typename': Value(dtype='string')
                }
              ],
            'normalizedAbstract': Value(dtype='string'),
            'title': Value(dtype='string'),
            'normalizedTitle': Value(dtype='string'),
            'fno': Value(dtype='string'),
            'hasPdf': Value(dtype='bool'),
            'idPrefix': Value(dtype='string'),
            'keywords': [
                Value(dtype='string')
            ],
            'authors': [
                {
                    'givenName': Value(dtype='string'),
                    'surname': Value(dtype='string'),
                    'fullName': Value(dtype='string'),
                    'affiliation': Value(dtype='string'),
                    '__typename': Value(dtype='string')
                }
              ],
            'replicability': Features({
                'isEnabled': Value(dtype='bool'), 
                'codeDownloadUrl': Value(dtype='string'), 
                'codeRepositoryUrl': Value(dtype='string'), 
                '__typename': Value(dtype='string')
            }),
            'showBuyMe': Value(dtype='bool'),
            'showRecommendedArticles': Value(dtype='bool'),
            'isOpenAccess': Value(dtype='bool'),
            'issueNum':Value(dtype='string'),
            'pubDate': Value(dtype='string'),
            'pubType': Value(dtype='string'),
            'pages': Value(dtype='string'),
            'year': Value(dtype='string'),
            'issn': Value(dtype='string'),
            'isbn': Value(dtype='string'),
            'notes': Value(dtype='string'),
            'notesType': Value(dtype='string'),
            '__typename': Value(dtype='string'),
          }),
          'recommendedArticles': [
              {
                'id': Value(dtype='string'),
                'title': Value(dtype='string'),
                'doi': Value(dtype='string'),
                'abstractUrl': Value(dtype='string'),
                'parentPublication': 
                    {
                    'id': Value(dtype='string'),
                    'title': Value(dtype='string'),
                    '__typename': Value(dtype='string')
                    },
                '__typename': Value(dtype='string')
              },
          ],
          'adjacentArticles': Features({
              'previous': {
                    'fno': Value(dtype='string'),
                    'articleId': Value(dtype='string'),
                    '__typename': Value(dtype='string')},
              'next': {
                    'fno': Value(dtype='string'),
                    'articleId': Value(dtype='string'),
                    '__typename': Value(dtype='string')
                    },
              '__typename': Value(dtype='string')
              }),
          'webExtras': [
              Features({
                  'id': Value(dtype='string'),
                  'name': Value(dtype='string'), 
                  'location': Value(dtype='string'), 
                  'extension': Value(dtype='string'), 
                  'size': Value(dtype='string'), 
                  '__typename': Value(dtype='string')
              })
          ],
          'articleVideos': [Value(dtype='string')]
        })
    })
Downloads last month
0
Edit dataset card