data
dict |
---|
{
"issue": {
"id": "12OmNqESuig",
"title": "May/June",
"year": "2010",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "16",
"label": "May/June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwI5TQU",
"doi": "10.1109/TVCG.2009.74",
"abstract": "This paper presents a new method for voxelization of solid objects containing sharp details. Voxelization is a sampling process that transforms a continuously defined object into a discrete one represented as a voxel field. The voxel field can be used for rendering or other purposes, which often involve a reconstruction of a continuous approximation of the original object. Objects to be voxelized need to fulfill certain representability conditions; otherwise, disturbing artifacts appear during reconstruction. The method proposed here extends the traditional distance-based voxelization by an a-priori detection of sharp object details and their subsequent modification in such a way that the resulting object to be voxelized fulfills the representability conditions. The resulting discrete objects are represented by means of truncated (i.e., narrow-band) distance fields, which provide reduction of memory requirements and further processing by level set techniques. This approach is exemplified by two classes of solid objects that normally contain such sharp details: implicit solids and solids resulting from CSG operations. In both cases, the sharp details are rounded to a specific curvature dictated by the sampling distance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a new method for voxelization of solid objects containing sharp details. Voxelization is a sampling process that transforms a continuously defined object into a discrete one represented as a voxel field. The voxel field can be used for rendering or other purposes, which often involve a reconstruction of a continuous approximation of the original object. Objects to be voxelized need to fulfill certain representability conditions; otherwise, disturbing artifacts appear during reconstruction. The method proposed here extends the traditional distance-based voxelization by an a-priori detection of sharp object details and their subsequent modification in such a way that the resulting object to be voxelized fulfills the representability conditions. The resulting discrete objects are represented by means of truncated (i.e., narrow-band) distance fields, which provide reduction of memory requirements and further processing by level set techniques. This approach is exemplified by two classes of solid objects that normally contain such sharp details: implicit solids and solids resulting from CSG operations. In both cases, the sharp details are rounded to a specific curvature dictated by the sampling distance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a new method for voxelization of solid objects containing sharp details. Voxelization is a sampling process that transforms a continuously defined object into a discrete one represented as a voxel field. The voxel field can be used for rendering or other purposes, which often involve a reconstruction of a continuous approximation of the original object. Objects to be voxelized need to fulfill certain representability conditions; otherwise, disturbing artifacts appear during reconstruction. The method proposed here extends the traditional distance-based voxelization by an a-priori detection of sharp object details and their subsequent modification in such a way that the resulting object to be voxelized fulfills the representability conditions. The resulting discrete objects are represented by means of truncated (i.e., narrow-band) distance fields, which provide reduction of memory requirements and further processing by level set techniques. This approach is exemplified by two classes of solid objects that normally contain such sharp details: implicit solids and solids resulting from CSG operations. In both cases, the sharp details are rounded to a specific curvature dictated by the sampling distance.",
"title": "Enhanced Voxelization and Representation of Objects with Sharp Details in Truncated Distance Fields",
"normalizedTitle": "Enhanced Voxelization and Representation of Objects with Sharp Details in Truncated Distance Fields",
"fno": "ttg2010030484",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Voxelization",
"Truncated Distance Fields",
"Run Length Compression",
"CSG Operations",
"Sharp Details",
"Implicit Solids",
"Artifacts",
"Representability"
],
"authors": [
{
"givenName": "Pavol",
"surname": "Novotný",
"fullName": "Pavol Novotný",
"affiliation": "IBL Software Engineering, Mierova and Comenius University, Bratislava",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Leonid I.",
"surname": "Dimitrov",
"fullName": "Leonid I. Dimitrov",
"affiliation": "Austrian Academy of Sciences, Vienna",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Miloš",
"surname": "Šrámek",
"fullName": "Miloš Šrámek",
"affiliation": "Austrian Academy of Sciences, Vienna",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2010-05-01 00:00:00",
"pubType": "trans",
"pages": "484-498",
"year": "2010",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vg/2005/26/0/01500531",
"title": "Representation of objects with sharp details in truncated distance fields",
"doi": null,
"abstractUrl": "/proceedings-article/vg/2005/01500531/12OmNC2xhyX",
"parentPublication": {
"id": "proceedings/vg/2005/26/0",
"title": "Volume Graphics 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgi/2004/2171/0/21710370",
"title": "CSG Operations with Voxelized Solids",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/2004/21710370/12OmNy2Jtay",
"parentPublication": {
"id": "proceedings/cgi/2004/2171/0",
"title": "Proceedings. Computer Graphics International",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1997/8183/1/81831389",
"title": "Interpolation of the DC component of coded images using a rational filter",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1997/81831389/12OmNzdoMHY",
"parentPublication": {
"id": "proceedings/icip/1997/8183/1",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2010030468",
"articleId": "13rRUxlgxOg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2010030499",
"articleId": "13rRUEgarBn",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNqESuig",
"title": "May/June",
"year": "2010",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "16",
"label": "May/June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUEgarBn",
"doi": "10.1109/TVCG.2009.87",
"abstract": "In this paper, we demonstrate that quasi-interpolation of orders two and four can be efficiently implemented on the Body-Centered Cubic (BCC) lattice by using tensor-product B-splines combined with appropriate discrete prefilters. Unlike the nonseparable box-spline reconstruction previously proposed for the BCC lattice, the prefiltered B-spline reconstruction can utilize the fast trilinear texture-fetching capability of the recent graphics cards. Therefore, it can be applied for rendering BCC-sampled volumetric data interactively. Furthermore, we show that a separable B-spline filter can suppress the postaliasing effect much more isotropically than a nonseparable box-spline filter of the same approximation power. Although prefilters that make the B-splines interpolating on the BCC lattice do not exist, we demonstrate that quasi-interpolating prefiltered linear and cubic B-spline reconstructions can still provide similar or higher image quality than the interpolating linear box-spline and prefiltered quintic box-spline reconstructions, respectively.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we demonstrate that quasi-interpolation of orders two and four can be efficiently implemented on the Body-Centered Cubic (BCC) lattice by using tensor-product B-splines combined with appropriate discrete prefilters. Unlike the nonseparable box-spline reconstruction previously proposed for the BCC lattice, the prefiltered B-spline reconstruction can utilize the fast trilinear texture-fetching capability of the recent graphics cards. Therefore, it can be applied for rendering BCC-sampled volumetric data interactively. Furthermore, we show that a separable B-spline filter can suppress the postaliasing effect much more isotropically than a nonseparable box-spline filter of the same approximation power. Although prefilters that make the B-splines interpolating on the BCC lattice do not exist, we demonstrate that quasi-interpolating prefiltered linear and cubic B-spline reconstructions can still provide similar or higher image quality than the interpolating linear box-spline and prefiltered quintic box-spline reconstructions, respectively.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we demonstrate that quasi-interpolation of orders two and four can be efficiently implemented on the Body-Centered Cubic (BCC) lattice by using tensor-product B-splines combined with appropriate discrete prefilters. Unlike the nonseparable box-spline reconstruction previously proposed for the BCC lattice, the prefiltered B-spline reconstruction can utilize the fast trilinear texture-fetching capability of the recent graphics cards. Therefore, it can be applied for rendering BCC-sampled volumetric data interactively. Furthermore, we show that a separable B-spline filter can suppress the postaliasing effect much more isotropically than a nonseparable box-spline filter of the same approximation power. Although prefilters that make the B-splines interpolating on the BCC lattice do not exist, we demonstrate that quasi-interpolating prefiltered linear and cubic B-spline reconstructions can still provide similar or higher image quality than the interpolating linear box-spline and prefiltered quintic box-spline reconstructions, respectively.",
"title": "An Evaluation of Prefiltered B-Spline Reconstruction for Quasi-Interpolation on the Body-Centered Cubic Lattice",
"normalizedTitle": "An Evaluation of Prefiltered B-Spline Reconstruction for Quasi-Interpolation on the Body-Centered Cubic Lattice",
"fno": "ttg2010030499",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Filtering",
"Sampling",
"Volume Visualization"
],
"authors": [
{
"givenName": "Balázs",
"surname": "Csébfalvi",
"fullName": "Balázs Csébfalvi",
"affiliation": "Budapest University of Technology and Economics, Budapest",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2010-05-01 00:00:00",
"pubType": "trans",
"pages": "499-512",
"year": "2010",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-vis/2005/2766/0/27660040",
"title": "Prefiltered Gaussian Reconstruction for High-Quality Rendering of Volumetric Data sampled on a Body-Centered Cubic Grid",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/27660040/12OmNAfy7Ky",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2004/8788/0/87880011",
"title": "Linear and Cubic Box Splines for the Body Centered Cubic Lattice",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2004/87880011/12OmNvAiScO",
"parentPublication": {
"id": "proceedings/ieee-vis/2004/8788/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/01532810",
"title": "Prefiltered Gaussian reconstruction for high-quality rendering of volumetric data sampled on a body-centered cubic grid",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/01532810/12OmNwFzO0f",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgi/1999/0185/0/01850188",
"title": "Monotonic Cubic Spline Interpolation",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/1999/01850188/12OmNynsbvs",
"parentPublication": {
"id": "proceedings/cgi/1999/0185/0",
"title": "Computer Graphics International Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/09/ttg2013091455",
"title": "Cosine-Weighted B-Spline Interpolation: A Fast and High-Quality Reconstruction Scheme for the Body-Centered Cubic Lattice",
"doi": null,
"abstractUrl": "/journal/tg/2013/09/ttg2013091455/13rRUx0xPTS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/02/ttg2013020319",
"title": "Quartic Box-Spline Reconstruction on the BCC Lattice",
"doi": null,
"abstractUrl": "/journal/tg/2013/02/ttg2013020319/13rRUxC0SvT",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2008/02/ttg2008020289",
"title": "An Evaluation of Prefiltered Reconstruction Schemes for Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2008/02/ttg2008020289/13rRUxYIMUR",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2008/02/ttg2008020313",
"title": "Practical Box Splines for Reconstruction on the Body Centered Cubic Lattice",
"doi": null,
"abstractUrl": "/journal/tg/2008/02/ttg2008020313/13rRUxZRbnW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2008/06/ttg2008061523",
"title": "Box Spline Reconstruction On The Face-Centered Cubic Lattice",
"doi": null,
"abstractUrl": "/journal/tg/2008/06/ttg2008061523/13rRUy0qnLC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv-2/2019/2850/0/285000a156",
"title": "Cubic B-Spline Curve Interpolation with Arbitrary Derivatives on its Data Points",
"doi": null,
"abstractUrl": "/proceedings-article/iv-2/2019/285000a156/1cMEQEhYBC8",
"parentPublication": {
"id": "proceedings/iv-2/2019/2850/0",
"title": "2019 23rd International Conference in Information Visualization – Part II",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2010030484",
"articleId": "13rRUwI5TQU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2010030513",
"articleId": "13rRUygT7mQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNqESuig",
"title": "May/June",
"year": "2010",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "16",
"label": "May/June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUygT7mQ",
"doi": "10.1109/TVCG.2009.90",
"abstract": "The paper investigates the novel concept of local-error control in mesh geometry encoding. In contrast to traditional mesh-coding systems that use the mean-square error as target distortion metric, this paper proposes a new L-infinite mesh-coding approach, for which the target distortion metric is the L-infinite distortion. In this context, a novel wavelet-based L-infinite-constrained coding approach for meshes is proposed, which ensures that the maximum error between the vertex positions in the original and decoded meshes is lower than a given upper bound. Furthermore, the proposed system achieves scalability in L-infinite sense, that is, any decoding of the input stream will correspond to a perfectly predictable L-infinite distortion upper bound. An instantiation of the proposed L-infinite-coding approach is demonstrated for MESHGRID, which is a scalable 3D object encoding system, part of MPEG-4 AFX. In this context, the advantages of scalable L-infinite coding over L-2-oriented coding are experimentally demonstrated. One concludes that the proposed L-infinite mesh-coding approach guarantees an upper bound on the local error in the decoded mesh, it enables a fast real-time implementation of the rate allocation, and it preserves all the scalability features and animation capabilities of the employed scalable mesh codec.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The paper investigates the novel concept of local-error control in mesh geometry encoding. In contrast to traditional mesh-coding systems that use the mean-square error as target distortion metric, this paper proposes a new L-infinite mesh-coding approach, for which the target distortion metric is the L-infinite distortion. In this context, a novel wavelet-based L-infinite-constrained coding approach for meshes is proposed, which ensures that the maximum error between the vertex positions in the original and decoded meshes is lower than a given upper bound. Furthermore, the proposed system achieves scalability in L-infinite sense, that is, any decoding of the input stream will correspond to a perfectly predictable L-infinite distortion upper bound. An instantiation of the proposed L-infinite-coding approach is demonstrated for MESHGRID, which is a scalable 3D object encoding system, part of MPEG-4 AFX. In this context, the advantages of scalable L-infinite coding over L-2-oriented coding are experimentally demonstrated. One concludes that the proposed L-infinite mesh-coding approach guarantees an upper bound on the local error in the decoded mesh, it enables a fast real-time implementation of the rate allocation, and it preserves all the scalability features and animation capabilities of the employed scalable mesh codec.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The paper investigates the novel concept of local-error control in mesh geometry encoding. In contrast to traditional mesh-coding systems that use the mean-square error as target distortion metric, this paper proposes a new L-infinite mesh-coding approach, for which the target distortion metric is the L-infinite distortion. In this context, a novel wavelet-based L-infinite-constrained coding approach for meshes is proposed, which ensures that the maximum error between the vertex positions in the original and decoded meshes is lower than a given upper bound. Furthermore, the proposed system achieves scalability in L-infinite sense, that is, any decoding of the input stream will correspond to a perfectly predictable L-infinite distortion upper bound. An instantiation of the proposed L-infinite-coding approach is demonstrated for MESHGRID, which is a scalable 3D object encoding system, part of MPEG-4 AFX. In this context, the advantages of scalable L-infinite coding over L-2-oriented coding are experimentally demonstrated. One concludes that the proposed L-infinite mesh-coding approach guarantees an upper bound on the local error in the decoded mesh, it enables a fast real-time implementation of the rate allocation, and it preserves all the scalability features and animation capabilities of the employed scalable mesh codec.",
"title": "Scalable L-Infinite Coding of Meshes",
"normalizedTitle": "Scalable L-Infinite Coding of Meshes",
"fno": "ttg2010030513",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"L Infinite Coding",
"L 2 Coding",
"Scalable Mesh Coding",
"MESHGRID",
"3 D Graphics",
"MPEG 4 AFX",
"1 CPRS"
],
"authors": [
{
"givenName": "Adrian",
"surname": "Munteanu",
"fullName": "Adrian Munteanu",
"affiliation": "Vrije Universiteit Brussel, Brussels",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dan C.",
"surname": "Cernea",
"fullName": "Dan C. Cernea",
"affiliation": "Vrije Universiteit Brussel, Brussels",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Alin",
"surname": "Alecu",
"fullName": "Alin Alecu",
"affiliation": "Vrije Universiteit Brussel, Brussels",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jan",
"surname": "Cornelis",
"fullName": "Jan Cornelis",
"affiliation": "Vrije Universiteit Brussel, Brussels",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Peter",
"surname": "Schelkens",
"fullName": "Peter Schelkens",
"affiliation": "Vrije Universiteit Brussel, Brussels",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2010-05-01 00:00:00",
"pubType": "trans",
"pages": "513-528",
"year": "2010",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icmew/2012/2027/0/06266228",
"title": "L-infinite Coding of 3D Representations of Human Affect",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2012/06266228/12OmNAqCtKW",
"parentPublication": {
"id": "proceedings/icmew/2012/2027/0",
"title": "2012 IEEE International Conference on Multimedia & Expo Workshops (ICMEW 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icn/2010/3979/0/3979a356",
"title": "A Novel Bit-Plane Shifting Algorithm for Scalable Audio Coding",
"doi": null,
"abstractUrl": "/proceedings-article/icn/2010/3979a356/12OmNBUS7bI",
"parentPublication": {
"id": "proceedings/icn/2010/3979/0",
"title": "International Conference on Networking",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1995/7310/1/73100109",
"title": "Vector quantization of images using the L/sub /spl infin// distortion measure",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1995/73100109/12OmNBpVQ8R",
"parentPublication": {
"id": "proceedings/icip/1995/7310/1",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/2008/3121/0/3121a529",
"title": "A Peer-to-Peer Architecture Based on Scalable Video Coding",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2008/3121a529/12OmNs0C9Vz",
"parentPublication": {
"id": "proceedings/dcc/2008/3121/0",
"title": "2008 Data Compression Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1997/8183/3/81833094",
"title": "Object-Scalable Mesh-Based Coding of Synthetic and Natural Image Objects",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1997/81833094/12OmNvA1hFX",
"parentPublication": {
"id": "proceedings/icip/1997/8183/3",
"title": "Proceedings of International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isda/2008/3382/3/3382c594",
"title": "Efficient Scalable Distributed Video Coding Based on Residual SW-SPIHT",
"doi": null,
"abstractUrl": "/proceedings-article/isda/2008/3382c594/12OmNy6HQTl",
"parentPublication": {
"id": "proceedings/isda/2008/3382/3",
"title": "Intelligent Systems Design and Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/1995/7012/0/70120381",
"title": "A new approach to scalable video coding",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/1995/70120381/12OmNyvoXjz",
"parentPublication": {
"id": "proceedings/dcc/1995/7012/0",
"title": "Proceedings DCC '95 Data Compression Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1997/8183/2/81832061",
"title": "Evaluation of temporally scalable video coding techniques",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1997/81832061/12OmNzBOi66",
"parentPublication": {
"id": "proceedings/icip/1997/8183/2",
"title": "Proceedings of International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nas/2009/3741/0/3741a239",
"title": "An Approach of Scalable MPEG-4 Video Bitstreams with Network Coding for P2P Swarming System",
"doi": null,
"abstractUrl": "/proceedings-article/nas/2009/3741a239/12OmNzFdtaX",
"parentPublication": {
"id": "proceedings/nas/2009/3741/0",
"title": "2009 IEEE International Conference on Networking, Architecture, and Storage",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2013/0015/0/06607580",
"title": "Scalable audio coding using watermarking",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2013/06607580/12OmNzayNyW",
"parentPublication": {
"id": "proceedings/icme/2013/0015/0",
"title": "2013 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2010030499",
"articleId": "13rRUEgarBn",
"__typename": "AdjacentArticleType"
},
"next": null,
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNALlciD",
"title": "July-September",
"year": "2000",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "6",
"label": "July-September",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwdrdSo",
"doi": "10.1109/TVCG.2000.10001",
"abstract": null,
"abstracts": [
{
"abstractType": "Regular",
"content": "",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": null,
"title": "Editor's Note",
"normalizedTitle": "Editor's Note",
"fno": "v0193",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Hans",
"surname": "Hagen",
"fullName": "Hans Hagen",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2000-07-01 00:00:00",
"pubType": "trans",
"pages": "193-195",
"year": "2000",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "v0196",
"articleId": "13rRUyYSWkS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNALlciD",
"title": "July-September",
"year": "2000",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "6",
"label": "July-September",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyYSWkS",
"doi": "10.1109/2945.879782",
"abstract": "Abstract—Volume renderers for interactive analysis must be sufficiently versatile to render a broad range of volume images: unsegmented “raw” images as recorded by a 3D scanner, labeled segmented images, multimodality images, or any combination of these. The usual strategy is to assign to each voxel a three component RGB color and an opacity value Z_$\\alpha$_Z. This so-called RGBZ_$\\alpha$_Z approach offers the possibility of distinguishing volume objects by color. However, these colors are connected to the objects themselves, thereby bypassing the idea that in reality the color of an object is also determined by the light source and light detectors c.q. human eyes. The physically realistic approach presented here models light interacting with the materials inside a voxel causing spectral changes in the light. The radiated spectrum falls upon a set of RGB detectors. The spectral approach is investigated to see whether it could enhance the visualization of volume data and interactive tools. For that purpose, a material is split into an absorbing part (the medium) and a scattering part (small particles). The medium is considered to be either achromatic or chromatic, while the particles are considered to scatter the light achromatically, elastically, or inelastically. It appears that inelastic scattering particles combined with an achromatic absorbing medium offer additional visual features: Objects are made visible through the surface structure of a surrounding volume object and volume and surface structures can be made visible at the same time. With one or two materials the method is faster than the RGBZ_$\\alpha$_Z approach, with three materials the performance is equal. The spectral approach can be considered as an extension of the RGBZ_$\\alpha$_Z approach with a greater visual flexibility and a better balance between quality and speed.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—Volume renderers for interactive analysis must be sufficiently versatile to render a broad range of volume images: unsegmented “raw” images as recorded by a 3D scanner, labeled segmented images, multimodality images, or any combination of these. The usual strategy is to assign to each voxel a three component RGB color and an opacity value $\\alpha$. This so-called RGB$\\alpha$ approach offers the possibility of distinguishing volume objects by color. However, these colors are connected to the objects themselves, thereby bypassing the idea that in reality the color of an object is also determined by the light source and light detectors c.q. human eyes. The physically realistic approach presented here models light interacting with the materials inside a voxel causing spectral changes in the light. The radiated spectrum falls upon a set of RGB detectors. The spectral approach is investigated to see whether it could enhance the visualization of volume data and interactive tools. For that purpose, a material is split into an absorbing part (the medium) and a scattering part (small particles). The medium is considered to be either achromatic or chromatic, while the particles are considered to scatter the light achromatically, elastically, or inelastically. It appears that inelastic scattering particles combined with an achromatic absorbing medium offer additional visual features: Objects are made visible through the surface structure of a surrounding volume object and volume and surface structures can be made visible at the same time. With one or two materials the method is faster than the RGB$\\alpha$ approach, with three materials the performance is equal. The spectral approach can be considered as an extension of the RGB$\\alpha$ approach with a greater visual flexibility and a better balance between quality and speed.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—Volume renderers for interactive analysis must be sufficiently versatile to render a broad range of volume images: unsegmented “raw” images as recorded by a 3D scanner, labeled segmented images, multimodality images, or any combination of these. The usual strategy is to assign to each voxel a three component RGB color and an opacity value -. This so-called RGB- approach offers the possibility of distinguishing volume objects by color. However, these colors are connected to the objects themselves, thereby bypassing the idea that in reality the color of an object is also determined by the light source and light detectors c.q. human eyes. The physically realistic approach presented here models light interacting with the materials inside a voxel causing spectral changes in the light. The radiated spectrum falls upon a set of RGB detectors. The spectral approach is investigated to see whether it could enhance the visualization of volume data and interactive tools. For that purpose, a material is split into an absorbing part (the medium) and a scattering part (small particles). The medium is considered to be either achromatic or chromatic, while the particles are considered to scatter the light achromatically, elastically, or inelastically. It appears that inelastic scattering particles combined with an achromatic absorbing medium offer additional visual features: Objects are made visible through the surface structure of a surrounding volume object and volume and surface structures can be made visible at the same time. With one or two materials the method is faster than the RGB- approach, with three materials the performance is equal. The spectral approach can be considered as an extension of the RGB- approach with a greater visual flexibility and a better balance between quality and speed.",
"title": "Spectral Volume Rendering",
"normalizedTitle": "Spectral Volume Rendering",
"fno": "v0196",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Volume Rendering",
"Light Matter Interaction",
"Light Spectra",
"Physical Realism",
"Visual Cues"
],
"authors": [
{
"givenName": "Herke Jan",
"surname": "Noordmans",
"fullName": "Herke Jan Noordmans",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hans T.M.",
"surname": "van der Voort",
"fullName": "Hans T.M. van der Voort",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Arnold W.M.",
"surname": "Smeulders",
"fullName": "Arnold W.M. Smeulders",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2000-07-01 00:00:00",
"pubType": "trans",
"pages": "196-207",
"year": "2000",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0193",
"articleId": "13rRUwdrdSo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0208",
"articleId": "13rRUyfKIHx",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNALlciD",
"title": "July-September",
"year": "2000",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "6",
"label": "July-September",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyfKIHx",
"doi": "10.1109/2945.879783",
"abstract": "Abstract—Analyzing the accessibility of an object's surface to probes or tools is important for many planning and programming tasks that involve spatial reasoning and arise in robotics and automation. This paper presents novel and efficient algorithms for computing accessible directions for tactile probes used in 3D digitization with Coordinate Measuring Machines. The algorithms are executed in standard computer graphics hardware. They are a nonobvious application of rendering hardware to scientific and technological areas beyond computer graphics.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—Analyzing the accessibility of an object's surface to probes or tools is important for many planning and programming tasks that involve spatial reasoning and arise in robotics and automation. This paper presents novel and efficient algorithms for computing accessible directions for tactile probes used in 3D digitization with Coordinate Measuring Machines. The algorithms are executed in standard computer graphics hardware. They are a nonobvious application of rendering hardware to scientific and technological areas beyond computer graphics.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—Analyzing the accessibility of an object's surface to probes or tools is important for many planning and programming tasks that involve spatial reasoning and arise in robotics and automation. This paper presents novel and efficient algorithms for computing accessible directions for tactile probes used in 3D digitization with Coordinate Measuring Machines. The algorithms are executed in standard computer graphics hardware. They are a nonobvious application of rendering hardware to scientific and technological areas beyond computer graphics.",
"title": "Accessibility Analysis Using Computer Graphics Hardware",
"normalizedTitle": "Accessibility Analysis Using Computer Graphics Hardware",
"fno": "v0208",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Accessibility Analysis",
"Dimensional Inspection Planning",
"Coordinate Measuring Machines",
"Direction Cones",
"Configuration Space",
"Spatial Reasoning",
"CAD CAM",
"Rasterizing Computer Graphics Hardware",
"Visibility",
"Visual Inspection"
],
"authors": [
{
"givenName": "Steven N.",
"surname": "Spitz",
"fullName": "Steven N. Spitz",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Aristides A.G.",
"surname": "Requicha",
"fullName": "Aristides A.G. Requicha",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2000-07-01 00:00:00",
"pubType": "trans",
"pages": "208-219",
"year": "2000",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0196",
"articleId": "13rRUyYSWkS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0220",
"articleId": "13rRUxBa5rJ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNALlciD",
"title": "July-September",
"year": "2000",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "6",
"label": "July-September",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxBa5rJ",
"doi": "10.1109/2945.879784",
"abstract": "Abstract—We describe a novel algorithm for computing view-independent finite-element radiosity solutions on distributed shared-memory parallel architectures. Our approach is based on the notion of a subiteration being the transfer of energy from a single source to a subset of the scene's receiver patches. By using an efficient queue-based scheduling system to process these subiterations, we show how radiosity solutions can be generated without the need for processor synchronization between iterations of the progressive refinement algorithm. The only significant source of interprocessor communication required by our method is for visibility calculations. We also describe a perceptually-driven approach to visibility estimation, which employs an efficient volumetric grid structure and attempts to reduce the amount of interprocessor communication by approximating visibility queries between distant patches. Our algorithm also eliminates the need for dynamic load-balancing until the end of the solution process and is shown to achieve a super-linear speedup in many situations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—We describe a novel algorithm for computing view-independent finite-element radiosity solutions on distributed shared-memory parallel architectures. Our approach is based on the notion of a subiteration being the transfer of energy from a single source to a subset of the scene's receiver patches. By using an efficient queue-based scheduling system to process these subiterations, we show how radiosity solutions can be generated without the need for processor synchronization between iterations of the progressive refinement algorithm. The only significant source of interprocessor communication required by our method is for visibility calculations. We also describe a perceptually-driven approach to visibility estimation, which employs an efficient volumetric grid structure and attempts to reduce the amount of interprocessor communication by approximating visibility queries between distant patches. Our algorithm also eliminates the need for dynamic load-balancing until the end of the solution process and is shown to achieve a super-linear speedup in many situations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—We describe a novel algorithm for computing view-independent finite-element radiosity solutions on distributed shared-memory parallel architectures. Our approach is based on the notion of a subiteration being the transfer of energy from a single source to a subset of the scene's receiver patches. By using an efficient queue-based scheduling system to process these subiterations, we show how radiosity solutions can be generated without the need for processor synchronization between iterations of the progressive refinement algorithm. The only significant source of interprocessor communication required by our method is for visibility calculations. We also describe a perceptually-driven approach to visibility estimation, which employs an efficient volumetric grid structure and attempts to reduce the amount of interprocessor communication by approximating visibility queries between distant patches. Our algorithm also eliminates the need for dynamic load-balancing until the end of the solution process and is shown to achieve a super-linear speedup in many situations.",
"title": "A Perceptually-Driven Parallel Algorithm for Efficient Radiosity Simulation",
"normalizedTitle": "A Perceptually-Driven Parallel Algorithm for Efficient Radiosity Simulation",
"fno": "v0220",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Radiosity",
"Progressive Refinement",
"Parallelism",
"Distributed Shared Memory",
"Load Balancing",
"Tone Reproduction",
"Visibility"
],
"authors": [
{
"givenName": "Simon",
"surname": "Gibson",
"fullName": "Simon Gibson",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Roger J.",
"surname": "Hubbold",
"fullName": "Roger J. Hubbold",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2000-07-01 00:00:00",
"pubType": "trans",
"pages": "220-235",
"year": "2000",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0208",
"articleId": "13rRUyfKIHx",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0236",
"articleId": "13rRUwIF6dC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNALlciD",
"title": "July-September",
"year": "2000",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "6",
"label": "July-September",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwIF6dC",
"doi": "10.1109/2945.879785",
"abstract": "Abstract—This paper discusses and experimentally compares distance-based acceleration algorithms for ray-tracing of volumetric data with an emphasis on the Chessboard Distance (CD) voxel traversal. The acceleration of this class of algorithms is achieved by skipping empty macro regions, which are defined for each background voxel of the volume. Background voxels are labeled in a preprocessing phase by a value, defining the macro region size, which is equal to the voxel distance to the nearest foreground voxel. The CD algorithm exploits the chessboard distance and defines the ray as a nonuniform sequence of samples positioned at voxel faces. This feature assures that no foreground voxels are missed during the scene traversal. Further, due to parallelepipedal shape of the macro region, it supports accelerated visualization of cubic, regular, and rectilinear grids. The CD algorithm is suitable for all modifications of the ray tracing/ray casting techniques being used in volume visualization and volume graphics. However, when used for rendering based on local surface interpolation, it also enables fast search of intersections between rays and the interpolated surface, further improving speed of the process.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—This paper discusses and experimentally compares distance-based acceleration algorithms for ray-tracing of volumetric data with an emphasis on the Chessboard Distance (CD) voxel traversal. The acceleration of this class of algorithms is achieved by skipping empty macro regions, which are defined for each background voxel of the volume. Background voxels are labeled in a preprocessing phase by a value, defining the macro region size, which is equal to the voxel distance to the nearest foreground voxel. The CD algorithm exploits the chessboard distance and defines the ray as a nonuniform sequence of samples positioned at voxel faces. This feature assures that no foreground voxels are missed during the scene traversal. Further, due to parallelepipedal shape of the macro region, it supports accelerated visualization of cubic, regular, and rectilinear grids. The CD algorithm is suitable for all modifications of the ray tracing/ray casting techniques being used in volume visualization and volume graphics. However, when used for rendering based on local surface interpolation, it also enables fast search of intersections between rays and the interpolated surface, further improving speed of the process.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—This paper discusses and experimentally compares distance-based acceleration algorithms for ray-tracing of volumetric data with an emphasis on the Chessboard Distance (CD) voxel traversal. The acceleration of this class of algorithms is achieved by skipping empty macro regions, which are defined for each background voxel of the volume. Background voxels are labeled in a preprocessing phase by a value, defining the macro region size, which is equal to the voxel distance to the nearest foreground voxel. The CD algorithm exploits the chessboard distance and defines the ray as a nonuniform sequence of samples positioned at voxel faces. This feature assures that no foreground voxels are missed during the scene traversal. Further, due to parallelepipedal shape of the macro region, it supports accelerated visualization of cubic, regular, and rectilinear grids. The CD algorithm is suitable for all modifications of the ray tracing/ray casting techniques being used in volume visualization and volume graphics. However, when used for rendering based on local surface interpolation, it also enables fast search of intersections between rays and the interpolated surface, further improving speed of the process.",
"title": "Fast Ray-Tracing of Rectilinear Volume Data Using Distance Transforms",
"normalizedTitle": "Fast Ray-Tracing of Rectilinear Volume Data Using Distance Transforms",
"fno": "v0236",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Volume Visualization",
"Volume Graphics",
"Volume Rendering",
"Distance Transforms",
"Macro Region",
"Voxel Traversal",
"Speed Up Techniques",
"Subvoxel Precision"
],
"authors": [
{
"givenName": "Milos",
"surname": "Sramek",
"fullName": "Milos Sramek",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Arie",
"surname": "Kaufman",
"fullName": "Arie Kaufman",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2000-07-01 00:00:00",
"pubType": "trans",
"pages": "236-252",
"year": "2000",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0220",
"articleId": "13rRUxBa5rJ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0253",
"articleId": "13rRUwj7coZ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNALlciD",
"title": "July-September",
"year": "2000",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "6",
"label": "July-September",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwj7coZ",
"doi": "10.1109/2945.879786",
"abstract": "Abstract—We describe a new approach for interactively approximating specular reflections in arbitrary curved surfaces. The technique is applicable to any smooth implicitly defined reflecting surface that is equipped with a ray intersection procedure; it is also extremely efficient as it employs local perturbations to interpolate point samples analytically. After ray tracing a sparse set of reflection paths with respect to a given vantage point and static reflecting surfaces, the algorithm rapidly approximates reflections of arbitrary points in 3-space by expressing them as perturbations of nearby points with known reflections. The reflection of each new point is approximated to second-order accuracy by applying a closed-form perturbation formula to one or more nearby reflection paths. This formula is derived from the Taylor expansion of a reflection path and is based on first and second-order path derivatives. After preprocessing, the approach is fast enough to compute reflections of tessellated diffuse objects in arbitrary curved surfaces at interactive rates using standard graphics hardware. The resulting images are nearly indistinguishable from ray traced images that take several orders of magnitude longer to generate.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—We describe a new approach for interactively approximating specular reflections in arbitrary curved surfaces. The technique is applicable to any smooth implicitly defined reflecting surface that is equipped with a ray intersection procedure; it is also extremely efficient as it employs local perturbations to interpolate point samples analytically. After ray tracing a sparse set of reflection paths with respect to a given vantage point and static reflecting surfaces, the algorithm rapidly approximates reflections of arbitrary points in 3-space by expressing them as perturbations of nearby points with known reflections. The reflection of each new point is approximated to second-order accuracy by applying a closed-form perturbation formula to one or more nearby reflection paths. This formula is derived from the Taylor expansion of a reflection path and is based on first and second-order path derivatives. After preprocessing, the approach is fast enough to compute reflections of tessellated diffuse objects in arbitrary curved surfaces at interactive rates using standard graphics hardware. The resulting images are nearly indistinguishable from ray traced images that take several orders of magnitude longer to generate.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—We describe a new approach for interactively approximating specular reflections in arbitrary curved surfaces. The technique is applicable to any smooth implicitly defined reflecting surface that is equipped with a ray intersection procedure; it is also extremely efficient as it employs local perturbations to interpolate point samples analytically. After ray tracing a sparse set of reflection paths with respect to a given vantage point and static reflecting surfaces, the algorithm rapidly approximates reflections of arbitrary points in 3-space by expressing them as perturbations of nearby points with known reflections. The reflection of each new point is approximated to second-order accuracy by applying a closed-form perturbation formula to one or more nearby reflection paths. This formula is derived from the Taylor expansion of a reflection path and is based on first and second-order path derivatives. After preprocessing, the approach is fast enough to compute reflections of tessellated diffuse objects in arbitrary curved surfaces at interactive rates using standard graphics hardware. The resulting images are nearly indistinguishable from ray traced images that take several orders of magnitude longer to generate.",
"title": "Perturbation Methods for Interactive Specular Reflections",
"normalizedTitle": "Perturbation Methods for Interactive Specular Reflections",
"fno": "v0253",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Animation Systems",
"Illumination Effects",
"Implicit Surfaces",
"Matting And Compositing",
"Optics",
"Ray Tracing"
],
"authors": [
{
"givenName": "Min",
"surname": "Chen",
"fullName": "Min Chen",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "James",
"surname": "Arvo",
"fullName": "James Arvo",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2000-07-01 00:00:00",
"pubType": "trans",
"pages": "253-264",
"year": "2000",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0236",
"articleId": "13rRUwIF6dC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0265",
"articleId": "13rRUyoPSOV",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNALlciD",
"title": "July-September",
"year": "2000",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "6",
"label": "July-September",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyoPSOV",
"doi": "10.1109/2945.879787",
"abstract": "Abstract—In this paper, we develop integrated techniques that unify physics-based modeling with geometric subdivision methodology and present a scheme for dynamic manipulation of the smooth limit surface generated by the (modified) butterfly scheme using physics-based “force” tools. This procedure-based surface model obtained through butterfly subdivision does not have a closed-form analytic formulation (unlike other well-known spline-based models) and, hence, poses challenging problems to incorporate mass and damping distributions, internal deformation energy, forces, and other physical quantities required to develop a physics-based model. Our primary contributions to computer graphics and geometric modeling include: 1) a new hierarchical formulation for locally parameterizing the butterfly subdivision surface over its initial control polyhedron, 2) formulation of dynamic butterfly subdivision surface as a set of novel finite elements, and 3) approximation of this new type of finite elements by a collection of existing finite elements subject to implicit geometric constraints. Our new physics-based model can be sculpted directly by applying synthesized forces and its equilibrium is characterized by the minimum of a deformation energy subject to the imposed constraints. We demonstrate that this novel dynamic framework not only provides a direct and natural means of manipulating geometric shapes, but also facilitates hierarchical shape and nonrigid motion estimation from large range and volumetric data sets using very few degrees of freedom (control vertices that define the initial polyhedron).",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—In this paper, we develop integrated techniques that unify physics-based modeling with geometric subdivision methodology and present a scheme for dynamic manipulation of the smooth limit surface generated by the (modified) butterfly scheme using physics-based “force” tools. This procedure-based surface model obtained through butterfly subdivision does not have a closed-form analytic formulation (unlike other well-known spline-based models) and, hence, poses challenging problems to incorporate mass and damping distributions, internal deformation energy, forces, and other physical quantities required to develop a physics-based model. Our primary contributions to computer graphics and geometric modeling include: 1) a new hierarchical formulation for locally parameterizing the butterfly subdivision surface over its initial control polyhedron, 2) formulation of dynamic butterfly subdivision surface as a set of novel finite elements, and 3) approximation of this new type of finite elements by a collection of existing finite elements subject to implicit geometric constraints. Our new physics-based model can be sculpted directly by applying synthesized forces and its equilibrium is characterized by the minimum of a deformation energy subject to the imposed constraints. We demonstrate that this novel dynamic framework not only provides a direct and natural means of manipulating geometric shapes, but also facilitates hierarchical shape and nonrigid motion estimation from large range and volumetric data sets using very few degrees of freedom (control vertices that define the initial polyhedron).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—In this paper, we develop integrated techniques that unify physics-based modeling with geometric subdivision methodology and present a scheme for dynamic manipulation of the smooth limit surface generated by the (modified) butterfly scheme using physics-based “force” tools. This procedure-based surface model obtained through butterfly subdivision does not have a closed-form analytic formulation (unlike other well-known spline-based models) and, hence, poses challenging problems to incorporate mass and damping distributions, internal deformation energy, forces, and other physical quantities required to develop a physics-based model. Our primary contributions to computer graphics and geometric modeling include: 1) a new hierarchical formulation for locally parameterizing the butterfly subdivision surface over its initial control polyhedron, 2) formulation of dynamic butterfly subdivision surface as a set of novel finite elements, and 3) approximation of this new type of finite elements by a collection of existing finite elements subject to implicit geometric constraints. Our new physics-based model can be sculpted directly by applying synthesized forces and its equilibrium is characterized by the minimum of a deformation energy subject to the imposed constraints. We demonstrate that this novel dynamic framework not only provides a direct and natural means of manipulating geometric shapes, but also facilitates hierarchical shape and nonrigid motion estimation from large range and volumetric data sets using very few degrees of freedom (control vertices that define the initial polyhedron).",
"title": "Dynamic Modeling of Butterfly Subdivision Surfaces",
"normalizedTitle": "Dynamic Modeling of Butterfly Subdivision Surfaces",
"fno": "v0265",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Dynamic Modeling",
"Physics Based Geometric Design",
"Geometric Modeling",
"CAGD",
"Subdivision Surfaces",
"Deformable Models",
"Finite Elements",
"Interactive Techniques"
],
"authors": [
{
"givenName": "Chhandomay",
"surname": "Mandal",
"fullName": "Chhandomay Mandal",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hong",
"surname": "Qin",
"fullName": "Hong Qin",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Baba C.",
"surname": "Vemuri",
"fullName": "Baba C. Vemuri",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2000-07-01 00:00:00",
"pubType": "trans",
"pages": "265-287",
"year": "2000",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0253",
"articleId": "13rRUwj7coZ",
"__typename": "AdjacentArticleType"
},
"next": null,
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzXnNEo",
"title": "September",
"year": "1995",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "1",
"label": "September",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxAATgp",
"doi": "10.1109/TVCG.1995.10002",
"abstract": null,
"abstracts": [
{
"abstractType": "Regular",
"content": "",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": null,
"title": "In Memoriam: Dr. Wolfgang Krueger",
"normalizedTitle": "In Memoriam: Dr. Wolfgang Krueger",
"fno": "v0209",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Pat",
"surname": "Hanrahan",
"fullName": "Pat Hanrahan",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "1995-07-01 00:00:00",
"pubType": "trans",
"pages": "209",
"year": "1995",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "v0210",
"articleId": "13rRUwdIOUy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzXnNEo",
"title": "September",
"year": "1995",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "1",
"label": "September",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwdIOUy",
"doi": "10.1109/2945.466716",
"abstract": "Abstract—Particle path computation in unsteady 3D vector fields given in discrete, structured form (i.e., as a hexahedral curvilinear grid) requires the local approximation of the vector field and the path. Quadrilinear interpolation and Bernstein-Bézier polynomials are used for the local vector field and path approximation. The next point in a sequence of points on a particle path is computed using this local approximation. Bernstein-Bézier polynomials are primarily used in geometric modeling, and their properties allow direct computation of points on a particle path.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—Particle path computation in unsteady 3D vector fields given in discrete, structured form (i.e., as a hexahedral curvilinear grid) requires the local approximation of the vector field and the path. Quadrilinear interpolation and Bernstein-Bézier polynomials are used for the local vector field and path approximation. The next point in a sequence of points on a particle path is computed using this local approximation. Bernstein-Bézier polynomials are primarily used in geometric modeling, and their properties allow direct computation of points on a particle path.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—Particle path computation in unsteady 3D vector fields given in discrete, structured form (i.e., as a hexahedral curvilinear grid) requires the local approximation of the vector field and the path. Quadrilinear interpolation and Bernstein-Bézier polynomials are used for the local vector field and path approximation. The next point in a sequence of points on a particle path is computed using this local approximation. Bernstein-Bézier polynomials are primarily used in geometric modeling, and their properties allow direct computation of points on a particle path.",
"title": "On Particle Path Generation Based on Quadrilinear Interpolation and Bernstein-Bézier Polynomials",
"normalizedTitle": "On Particle Path Generation Based on Quadrilinear Interpolation and Bernstein-Bézier Polynomials",
"fno": "v0210",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Approximation",
"Bernstein Bezier Polynomial",
"Particle Path",
"Curvilinear Grid",
"Path Line",
"Scientific Visualization",
"Structured Grid",
"Trajectory",
"Vector Field"
],
"authors": [
{
"givenName": "Bernd",
"surname": "Hamann",
"fullName": "Bernd Hamann",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Donghua",
"surname": "Wu",
"fullName": "Donghua Wu",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Robert J.",
"surname": "Moorhead II",
"fullName": "Robert J. Moorhead II",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "1995-07-01 00:00:00",
"pubType": "trans",
"pages": "210-217",
"year": "1995",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0209",
"articleId": "13rRUxAATgp",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0218",
"articleId": "13rRUxBJhFh",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzXnNEo",
"title": "September",
"year": "1995",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "1",
"label": "September",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxBJhFh",
"doi": "10.1109/2945.466717",
"abstract": "Abstract—Collision detection and response are important for interactive graphics applications such as vehicle simulators and virtual reality. Unfortunately, previous collision-detection algorithms are too slow for interactive use. This paper presents a new algorithm for rigid or articulated objects that meets performance goals through a form of time-critical computing. The algorithm supports progressive refinement, detecting collisions between successively tighter approximations to object surfaces as the application allows it more processing time. The algorithm uses simple four-dimensional geometry to approximate motion, and hierarchies of spheres to approximate three-dimensional surfaces at multiple resolutions. In a sample application, the algorithm allows interactive performance that is not possible with a good previous algorithm. In particular, the new algorithm provides acceptable accuracy while maintaining a steady and high frame rate, which in some cases improves on the previous algorithm’s rate by more than two orders of magnitude.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—Collision detection and response are important for interactive graphics applications such as vehicle simulators and virtual reality. Unfortunately, previous collision-detection algorithms are too slow for interactive use. This paper presents a new algorithm for rigid or articulated objects that meets performance goals through a form of time-critical computing. The algorithm supports progressive refinement, detecting collisions between successively tighter approximations to object surfaces as the application allows it more processing time. The algorithm uses simple four-dimensional geometry to approximate motion, and hierarchies of spheres to approximate three-dimensional surfaces at multiple resolutions. In a sample application, the algorithm allows interactive performance that is not possible with a good previous algorithm. In particular, the new algorithm provides acceptable accuracy while maintaining a steady and high frame rate, which in some cases improves on the previous algorithm’s rate by more than two orders of magnitude.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—Collision detection and response are important for interactive graphics applications such as vehicle simulators and virtual reality. Unfortunately, previous collision-detection algorithms are too slow for interactive use. This paper presents a new algorithm for rigid or articulated objects that meets performance goals through a form of time-critical computing. The algorithm supports progressive refinement, detecting collisions between successively tighter approximations to object surfaces as the application allows it more processing time. The algorithm uses simple four-dimensional geometry to approximate motion, and hierarchies of spheres to approximate three-dimensional surfaces at multiple resolutions. In a sample application, the algorithm allows interactive performance that is not possible with a good previous algorithm. In particular, the new algorithm provides acceptable accuracy while maintaining a steady and high frame rate, which in some cases improves on the previous algorithm’s rate by more than two orders of magnitude.",
"title": "Collision Detection for Interactive Graphics Applications",
"normalizedTitle": "Collision Detection for Interactive Graphics Applications",
"fno": "v0218",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Collision Detection",
"Time Critical Computing",
"Real Time Performance",
"Interaction",
"Four Dimensions",
"Approximation"
],
"authors": [
{
"givenName": "Philip M.",
"surname": "Hubbard",
"fullName": "Philip M. Hubbard",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "1995-07-01 00:00:00",
"pubType": "trans",
"pages": "218-230",
"year": "1995",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0210",
"articleId": "13rRUwdIOUy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0231",
"articleId": "13rRUwbJD4B",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzXnNEo",
"title": "September",
"year": "1995",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "1",
"label": "September",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwbJD4B",
"doi": "10.1109/2945.466718",
"abstract": "Abstract—A line-art nonphotorealistic rendering scheme of scenes composed of freeform surfaces is presented. A freeform surface coverage is constructed using a set of isoparametric curves. The density of the isoparametric curves is set to be a function of the illumination of the surface determined using a simple shading model, or of regions of special importance such as silhouettes. The outcome is one way of achieving an aesthetic and attractive line-art rendering that employs isoparametric curve based drawings that is suitable for printing publication.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—A line-art nonphotorealistic rendering scheme of scenes composed of freeform surfaces is presented. A freeform surface coverage is constructed using a set of isoparametric curves. The density of the isoparametric curves is set to be a function of the illumination of the surface determined using a simple shading model, or of regions of special importance such as silhouettes. The outcome is one way of achieving an aesthetic and attractive line-art rendering that employs isoparametric curve based drawings that is suitable for printing publication.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—A line-art nonphotorealistic rendering scheme of scenes composed of freeform surfaces is presented. A freeform surface coverage is constructed using a set of isoparametric curves. The density of the isoparametric curves is set to be a function of the illumination of the surface determined using a simple shading model, or of regions of special importance such as silhouettes. The outcome is one way of achieving an aesthetic and attractive line-art rendering that employs isoparametric curve based drawings that is suitable for printing publication.",
"title": "Line Art Rendering via a Coverage of Isoparametric Curves",
"normalizedTitle": "Line Art Rendering via a Coverage of Isoparametric Curves",
"fno": "v0231",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Sketches",
"Illustrations",
"Line Drawings",
"Freeform Surfaces",
"NUR Bs",
"Gridless Halftoning",
"Printing"
],
"authors": [
{
"givenName": "Gershon",
"surname": "Elber",
"fullName": "Gershon Elber",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "1995-07-01 00:00:00",
"pubType": "trans",
"pages": "231-239",
"year": "1995",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0218",
"articleId": "13rRUxBJhFh",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0240",
"articleId": "13rRUyYBlgn",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzXnNEo",
"title": "September",
"year": "1995",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "1",
"label": "September",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyYBlgn",
"doi": "10.1109/2945.466719",
"abstract": "Abstract—This paper presents a new radiosity algorithm that allows the simultaneous computation of energy exchanges between surface elements, scattering volume distributions, and groups of surfaces, or object clusters. The new technique is based on a hierarchical formulation of the zonal method, and efficiently integrates volumes and surfaces. In particular no initial linking stage is needed, even for inhomogeneous volumes, thanks to the construction of a global spatial hierarchy. An analogy between object clusters and scattering volumes results in a powerful clustering radiosity algorithm, with no initial linking between surfaces and fast computation of average visibility information through a cluster. We show that the accurate distribution of the energy emitted or received at the cluster level can produce even better results than isotropic clustering at a marginal cost. The resulting algorithm is fast and, more importantly, truly progressive as it allows the quick calculation of approximate solutions with a smooth convergence towards very accurate simulations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—This paper presents a new radiosity algorithm that allows the simultaneous computation of energy exchanges between surface elements, scattering volume distributions, and groups of surfaces, or object clusters. The new technique is based on a hierarchical formulation of the zonal method, and efficiently integrates volumes and surfaces. In particular no initial linking stage is needed, even for inhomogeneous volumes, thanks to the construction of a global spatial hierarchy. An analogy between object clusters and scattering volumes results in a powerful clustering radiosity algorithm, with no initial linking between surfaces and fast computation of average visibility information through a cluster. We show that the accurate distribution of the energy emitted or received at the cluster level can produce even better results than isotropic clustering at a marginal cost. The resulting algorithm is fast and, more importantly, truly progressive as it allows the quick calculation of approximate solutions with a smooth convergence towards very accurate simulations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—This paper presents a new radiosity algorithm that allows the simultaneous computation of energy exchanges between surface elements, scattering volume distributions, and groups of surfaces, or object clusters. The new technique is based on a hierarchical formulation of the zonal method, and efficiently integrates volumes and surfaces. In particular no initial linking stage is needed, even for inhomogeneous volumes, thanks to the construction of a global spatial hierarchy. An analogy between object clusters and scattering volumes results in a powerful clustering radiosity algorithm, with no initial linking between surfaces and fast computation of average visibility information through a cluster. We show that the accurate distribution of the energy emitted or received at the cluster level can produce even better results than isotropic clustering at a marginal cost. The resulting algorithm is fast and, more importantly, truly progressive as it allows the quick calculation of approximate solutions with a smooth convergence towards very accurate simulations.",
"title": "A Unified Hierarchical Algorithm for Global Illumination with Scattering Volumes and Object Clusters",
"normalizedTitle": "A Unified Hierarchical Algorithm for Global Illumination with Scattering Volumes and Object Clusters",
"fno": "v0240",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Radiosity",
"Hierarchical Techniques",
"Clustering",
"Visibility",
"Volume Scattering",
"Lighting Simulation",
"Realistic Image Synthesis"
],
"authors": [
{
"givenName": "Fran?ois X.",
"surname": "Sillion",
"fullName": "Fran?ois X. Sillion",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "1995-07-01 00:00:00",
"pubType": "trans",
"pages": "240-254",
"year": "1995",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/pvg/1999/5901/0/59010079",
"title": "Overlapping multi-processing and graphics hardware acceleration: performance evaluation",
"doi": null,
"abstractUrl": "/proceedings-article/pvg/1999/59010079/12OmNAoUT4k",
"parentPublication": {
"id": "proceedings/pvg/1999/5901/0",
"title": "Parallel and Large-Data Visualization and Graphics, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2006/0224/0/02240151",
"title": "Compensating Indirect Scattering for Immersive and Semi-Immersive Projection Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2006/02240151/12OmNCgrDcp",
"parentPublication": {
"id": "proceedings/vr/2006/0224/0",
"title": "IEEE Virtual Reality Conference (VR 2006)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nbis/2011/4458/0/4458a474",
"title": "Electromagnetic Scattering from Imperfectly Periodic Surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/nbis/2011/4458a474/12OmNvjQ92O",
"parentPublication": {
"id": "proceedings/nbis/2011/4458/0",
"title": "2011 14th International Conference on Network-Based Information Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vv/1998/9180/0/91800023",
"title": "Using Distance Maps for Accurate Surface Representation in Sampled Volumes",
"doi": null,
"abstractUrl": "/proceedings-article/vv/1998/91800023/12OmNvlg8pA",
"parentPublication": {
"id": "proceedings/vv/1998/9180/0",
"title": "Volume Visualization and Graphics, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/1999/0293/0/02930021",
"title": "Automatic Creation of Object Hierarchies for Radiosity Clustering",
"doi": null,
"abstractUrl": "/proceedings-article/pg/1999/02930021/12OmNyjLoPS",
"parentPublication": {
"id": "proceedings/pg/1999/0293/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761892",
"title": "Analysis of subsurface scattering under generic illumination",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761892/12OmNzd7bV9",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/07/06658758",
"title": "Translucent Radiosity: Efficiently CombiningDiffuse Inter-Reflection andSubsurface Scattering",
"doi": null,
"abstractUrl": "/journal/tg/2014/07/06658758/13rRUwInvJg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2013/03/mcg2013030034",
"title": "Estimating Diffusion Parameters from Polarized Spherical-Gradient Illumination",
"doi": null,
"abstractUrl": "/magazine/cg/2013/03/mcg2013030034/13rRUy0qnIX",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200f651",
"title": "3DeepCT: Learning Volumetric Scattering Tomography of Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200f651/1BmFL2YrszK",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "v0231",
"articleId": "13rRUwbJD4B",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0255",
"articleId": "13rRUILLkDE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzXnNEo",
"title": "September",
"year": "1995",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "1",
"label": "September",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUILLkDE",
"doi": "10.1109/TVCG.1995.1",
"abstract": "Augmented reality entails the use of models and their associated renderings to supplement information in a real scene. In order for this information to be relevant or meaningful, the models must be positioned and displayed in such a way that they blend into the real world in terms of alignments, perspectives, illuminations, etc. For practical reasons the information necessary to obtain this realistic blending cannot be known a priori, and cannot be hard-wired into a system. Instead a number of calibration procedures are necessary so that the location and parameters of each of the system components are known. In this paper we identify the calibration steps necessary to build a computer model of the real world and then, using the monitor-based augmented reality system developed at ECRC (GRASP) as an example, we describe each of the calibration processes. These processes determine the internal parameters of our imaging devices (scan converter, frame grabber, and video camera), as well as the geometric transformations that relate all of the physical objects of the system to a known world coordinate system.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Augmented reality entails the use of models and their associated renderings to supplement information in a real scene. In order for this information to be relevant or meaningful, the models must be positioned and displayed in such a way that they blend into the real world in terms of alignments, perspectives, illuminations, etc. For practical reasons the information necessary to obtain this realistic blending cannot be known a priori, and cannot be hard-wired into a system. Instead a number of calibration procedures are necessary so that the location and parameters of each of the system components are known. In this paper we identify the calibration steps necessary to build a computer model of the real world and then, using the monitor-based augmented reality system developed at ECRC (GRASP) as an example, we describe each of the calibration processes. These processes determine the internal parameters of our imaging devices (scan converter, frame grabber, and video camera), as well as the geometric transformations that relate all of the physical objects of the system to a known world coordinate system.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Augmented reality entails the use of models and their associated renderings to supplement information in a real scene. In order for this information to be relevant or meaningful, the models must be positioned and displayed in such a way that they blend into the real world in terms of alignments, perspectives, illuminations, etc. For practical reasons the information necessary to obtain this realistic blending cannot be known a priori, and cannot be hard-wired into a system. Instead a number of calibration procedures are necessary so that the location and parameters of each of the system components are known. In this paper we identify the calibration steps necessary to build a computer model of the real world and then, using the monitor-based augmented reality system developed at ECRC (GRASP) as an example, we describe each of the calibration processes. These processes determine the internal parameters of our imaging devices (scan converter, frame grabber, and video camera), as well as the geometric transformations that relate all of the physical objects of the system to a known world coordinate system.",
"title": "Calibration Requirements and Procedures for a",
"normalizedTitle": "Calibration Requirements and Procedures for a",
"fno": "v0255",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Mihran",
"surname": "Tuceryan",
"fullName": "Mihran Tuceryan",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Douglas S.",
"surname": "Greer",
"fullName": "Douglas S. Greer",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ross T.",
"surname": "Whitaker",
"fullName": "Ross T. Whitaker",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "David E.",
"surname": "Breen,",
"fullName": "David E. Breen,",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chris",
"surname": "Crampton",
"fullName": "Chris Crampton",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Eric",
"surname": "Rose",
"fullName": "Eric Rose",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Klaus H.",
"surname": "Ahlers",
"fullName": "Klaus H. Ahlers",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "1995-07-01 00:00:00",
"pubType": "trans",
"pages": "255-273",
"year": "1995",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2015/7660/0/7660a100",
"title": "[POSTER] RGB-D/C-arm Calibration and Application in Medical Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a100/12OmNAfPIP9",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2007/2929/0/29290045",
"title": "A Practical Calibration Method for Multiple Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2007/29290045/12OmNCfSqGb",
"parentPublication": {
"id": "proceedings/icig/2007/2929/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2002/1781/0/17810176",
"title": "Calibration of a Head-Mounted Projective Display for Augmented Reality Systems",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2002/17810176/12OmNvFHfFC",
"parentPublication": {
"id": "proceedings/ismar/2002/1781/0",
"title": "Proceedings. International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1997/7822/0/78220225",
"title": "Calibration of a Structured Light System: A Projective Approach",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1997/78220225/12OmNx76TJI",
"parentPublication": {
"id": "proceedings/cvpr/1997/7822/0",
"title": "Proceedings of IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948417",
"title": "Comprehensive workspace calibration for visuo-haptic augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948417/12OmNxIRxTh",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv-motion/2005/2271/1/227110356",
"title": "Requirements for Camera Calibration: Must Accuracy Come with a High Price?",
"doi": null,
"abstractUrl": "/proceedings-article/wacv-motion/2005/227110356/12OmNzSyCbA",
"parentPublication": {
"id": "proceedings/wacv-motion/2005/2271/1",
"title": "Applications of Computer Vision and the IEEE Workshop on Motion and Video Computing, IEEE Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acssc/1991/2470/0/00186575",
"title": "Optimum active array shape calibration",
"doi": null,
"abstractUrl": "/proceedings-article/acssc/1991/00186575/12OmNzZWbHr",
"parentPublication": {
"id": "proceedings/acssc/1991/2470/0",
"title": "Conference Record of the Twenty-Fifth Asilomar Conference on Signals, Systems & Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08052554",
"title": "A Survey of Calibration Methods for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08052554/13rRUILtJqY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/01/ttg2009010138",
"title": "Calibration, Registration, and Synchronization for High Precision Augmented Reality Haptics",
"doi": null,
"abstractUrl": "/journal/tg/2009/01/ttg2009010138/13rRUyYBlgv",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300b497",
"title": "Calibration Wizard: A Guidance System for Camera Calibration Based on Modelling Geometric and Corner Uncertainty",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300b497/1hVlBp1zhpm",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "v0240",
"articleId": "13rRUyYBlgn",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0274",
"articleId": "13rRUxASuSz",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzXnNEo",
"title": "September",
"year": "1995",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "1",
"label": "September",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxASuSz",
"doi": "10.1109/TVCG.1995.2",
"abstract": "present an efticient algorithm for dynamic adaptive color quantization of 24-bit image (video) sequences, important in multimedia applications. Besides producing hifidelity S-bit imagery, our algorithm runs with minimal computational cost and the generated colormaps are robust to small differences in consecutive images. Apart from the two standard color quantization tasks, colormap design and quantizer mapping, our aigorithm inciudes coiormap fiiiing-an operation unique to dynamic color quantization. This task solves the problem of screen flicker, a serious problem in dynamic quantization of image sequences, resulting from rapid changes in display of colormaps. Our solution is based on two ideas: including in the current colormap a small set of color representatives from the previous image; assigning representatives to the colormap entries in an order that reduces the difference between contents of equal entries in consecutive colormaps. Our algorithm runs in near real time on medium-range workstations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "present an efticient algorithm for dynamic adaptive color quantization of 24-bit image (video) sequences, important in multimedia applications. Besides producing hifidelity S-bit imagery, our algorithm runs with minimal computational cost and the generated colormaps are robust to small differences in consecutive images. Apart from the two standard color quantization tasks, colormap design and quantizer mapping, our aigorithm inciudes coiormap fiiiing-an operation unique to dynamic color quantization. This task solves the problem of screen flicker, a serious problem in dynamic quantization of image sequences, resulting from rapid changes in display of colormaps. Our solution is based on two ideas: including in the current colormap a small set of color representatives from the previous image; assigning representatives to the colormap entries in an order that reduces the difference between contents of equal entries in consecutive colormaps. Our algorithm runs in near real time on medium-range workstations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "present an efticient algorithm for dynamic adaptive color quantization of 24-bit image (video) sequences, important in multimedia applications. Besides producing hifidelity S-bit imagery, our algorithm runs with minimal computational cost and the generated colormaps are robust to small differences in consecutive images. Apart from the two standard color quantization tasks, colormap design and quantizer mapping, our aigorithm inciudes coiormap fiiiing-an operation unique to dynamic color quantization. This task solves the problem of screen flicker, a serious problem in dynamic quantization of image sequences, resulting from rapid changes in display of colormaps. Our solution is based on two ideas: including in the current colormap a small set of color representatives from the previous image; assigning representatives to the colormap entries in an order that reduces the difference between contents of equal entries in consecutive colormaps. Our algorithm runs in near real time on medium-range workstations.",
"title": "Dynamic Color Quantization of Video Sequences",
"normalizedTitle": "Dynamic Color Quantization of Video Sequences",
"fno": "v0274",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Evgeny",
"surname": "Roytman",
"fullName": "Evgeny Roytman",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Craig",
"surname": "Gotsman",
"fullName": "Craig Gotsman",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "1995-07-01 00:00:00",
"pubType": "trans",
"pages": "274-286",
"year": "1995",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iciap/2001/1183/0/11830596",
"title": "Contextual Color Quantization Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/iciap/2001/11830596/12OmNAXxWUB",
"parentPublication": {
"id": "proceedings/iciap/2001/1183/0",
"title": "Proceedings ICIAP 2001. 11th International Conference on Image Analysis and Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2004/2128/1/212810656",
"title": "Fast Color Image Quantization using Squared Euclidean Distance of Adjacent Color Points along the Highest Color Variance Axis",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2004/212810656/12OmNrIJqqR",
"parentPublication": {
"id": "proceedings/icpr/2004/2128/1",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1992/2920/0/00201921",
"title": "A color video image quantization method with stable and efficient color selection capability",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1992/00201921/12OmNvF83ne",
"parentPublication": {
"id": "proceedings/icpr/1992/2920/0",
"title": "11th IAPR International Conference on Pattern Recognition. Vol. III. Conference C: Image, Speech and Signal Analysis,",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csse/2008/3336/2/3336c943",
"title": "An Improved Median-Cut Algorithm of Color Image Quantization",
"doi": null,
"abstractUrl": "/proceedings-article/csse/2008/3336c943/12OmNzWfoVh",
"parentPublication": {
"id": "proceedings/csse/2008/3336/6",
"title": "Computer Science and Software Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/08/07305807",
"title": "A Survey of Colormaps in Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2016/08/07305807/13rRUwInvBb",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08454346",
"title": "Mapping Color to Meaning in Colormap Data Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08454346/17D45VsBU7I",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2019/4941/0/08933760",
"title": "Evaluating Gradient Perception in Color-Coded Scalar Fields",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2019/08933760/1fTgHHw1pSM",
"parentPublication": {
"id": "proceedings/vis/2019/4941/0",
"title": "2019 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/06/08939459",
"title": "The Making of Continuous Colormaps",
"doi": null,
"abstractUrl": "/journal/tg/2021/06/08939459/1fZRynxLXGM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2020/05/09167329",
"title": "The Importance of Colormaps",
"doi": null,
"abstractUrl": "/magazine/cs/2020/05/09167329/1mhPJUptqpy",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09395231",
"title": "Deep Colormap Extraction From Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09395231/1syq9xMMQY8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "v0255",
"articleId": "13rRUILLkDE",
"__typename": "AdjacentArticleType"
},
"next": null,
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzvhvFS",
"title": "May",
"year": "2013",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxBa5rV",
"doi": "10.1109/TVCG.2013.64",
"abstract": "The papers in this special section includes extended versions of four of the best papers presented at the 2012 ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games held from 9-11 March 2012 in Costa Mesa, California.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The papers in this special section includes extended versions of four of the best papers presented at the 2012 ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games held from 9-11 March 2012 in Costa Mesa, California.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The papers in this special section includes extended versions of four of the best papers presented at the 2012 ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games held from 9-11 March 2012 in Costa Mesa, California.",
"title": "Guest Editors' Introduction: Special Section on the ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games (I3D 2012)",
"normalizedTitle": "Guest Editors' Introduction: Special Section on the ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games (I3D 2012)",
"fno": "ttg2013050721",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Special Issues And Sections",
"Meetings",
"Computer Graphics",
"Three Dimensional Displays",
"Interactive Systems"
],
"authors": [
{
"givenName": "Michael",
"surname": "Garland",
"fullName": "Michael Garland",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rui",
"surname": "Wang",
"fullName": "Rui Wang",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "05",
"pubDate": "2013-05-01 00:00:00",
"pubType": "trans",
"pages": "721-722",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tb/2013/05/ttb2013051089",
"title": "Guest Editorial for ACM BCB",
"doi": null,
"abstractUrl": "/journal/tb/2013/05/ttb2013051089/13rRUEgs2Af",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2013/04/ttb2013040817",
"title": "Guest Editorial for Special Section on BSB 2012",
"doi": null,
"abstractUrl": "/journal/tb/2013/04/ttb2013040817/13rRUIIVlaV",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/03/07835799",
"title": "Guest Editor's Introduction: Special Section on the ACM SIGGRAPH/Eurographics Symposium on Computer Animation (SCA)",
"doi": null,
"abstractUrl": "/journal/tg/2017/03/07835799/13rRUIIVlkm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/06/ttg2013060898",
"title": "Guest Editors' Introduction: Special Section on the IEEE Pacific Visualization Symposium 2012",
"doi": null,
"abstractUrl": "/journal/tg/2013/06/ttg2013060898/13rRUNvgziD",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/10/07557101",
"title": "Guest Editor's Introduction to the Special Section on the ACM Symposium on Interactive 3D Graphics and Games (I3D)",
"doi": null,
"abstractUrl": "/journal/tg/2016/10/07557101/13rRUwbaqUQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010002",
"title": "Guest Editor's Introduction: Special Section on the ACM SIGGRAPH/Eurographics Symposium on Computer Animation (SCA)",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010002/13rRUwfZC0h",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2013/08/ttm2013081470",
"title": "Guest Editorial: Special Section on Outstanding Papers from MobiSys 2012",
"doi": null,
"abstractUrl": "/journal/tm/2013/08/ttm2013081470/13rRUxAATh6",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/10/07230338",
"title": "Guest Editor's Introduction: Special Section on the ACM SIGGRAPH/Eurographics Symposium on Computer Animation (SCA)",
"doi": null,
"abstractUrl": "/journal/tg/2015/10/07230338/13rRUxOve9L",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2014/01/ttm2014010003",
"title": "Guest Editorial: Special section on outstanding papers from MobiCom 2012",
"doi": null,
"abstractUrl": "/journal/tm/2014/01/ttm2014010003/13rRUxZ0o27",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/10/06881790",
"title": "Guest Editors' Introduction: Special Section on the ACM SIGGRAPH/Eurographics Symposium on Computer Animation (SCA)",
"doi": null,
"abstractUrl": "/journal/tg/2014/10/06881790/13rRUy0HYRq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "ttg2013050723",
"articleId": "13rRUxBa5bY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzvhvFS",
"title": "May",
"year": "2013",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxBa5bY",
"doi": "10.1109/TVCG.2012.295",
"abstract": "Existing natural media painting simulations have produced high-quality results, but have required powerful compute hardware and have been limited to screen resolutions. Digital artists would like to be able to use watercolor-like painting tools, but at print resolutions and on lower end hardware such as laptops or even slates. We present a procedural algorithm for generating watercolor-like dynamic paint behaviors in a lightweight manner. Our goal is not to exactly duplicate watercolor painting, but to create a range of dynamic behaviors that allow users to achieve a similar style of process and result, while at the same time having a unique character of its own. Our stroke representation is vector based, allowing for rendering at arbitrary resolutions, and our procedural pigment advection algorithm is fast enough to support painting on slate devices. We demonstrate our technique in a commercially available slate application used by professional artists. Finally, we present a detailed analysis of the different vector-rendering technologies available.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Existing natural media painting simulations have produced high-quality results, but have required powerful compute hardware and have been limited to screen resolutions. Digital artists would like to be able to use watercolor-like painting tools, but at print resolutions and on lower end hardware such as laptops or even slates. We present a procedural algorithm for generating watercolor-like dynamic paint behaviors in a lightweight manner. Our goal is not to exactly duplicate watercolor painting, but to create a range of dynamic behaviors that allow users to achieve a similar style of process and result, while at the same time having a unique character of its own. Our stroke representation is vector based, allowing for rendering at arbitrary resolutions, and our procedural pigment advection algorithm is fast enough to support painting on slate devices. We demonstrate our technique in a commercially available slate application used by professional artists. Finally, we present a detailed analysis of the different vector-rendering technologies available.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Existing natural media painting simulations have produced high-quality results, but have required powerful compute hardware and have been limited to screen resolutions. Digital artists would like to be able to use watercolor-like painting tools, but at print resolutions and on lower end hardware such as laptops or even slates. We present a procedural algorithm for generating watercolor-like dynamic paint behaviors in a lightweight manner. Our goal is not to exactly duplicate watercolor painting, but to create a range of dynamic behaviors that allow users to achieve a similar style of process and result, while at the same time having a unique character of its own. Our stroke representation is vector based, allowing for rendering at arbitrary resolutions, and our procedural pigment advection algorithm is fast enough to support painting on slate devices. We demonstrate our technique in a commercially available slate application used by professional artists. Finally, we present a detailed analysis of the different vector-rendering technologies available.",
"title": "Painting with Polygons: A Procedural Watercolor Engine",
"normalizedTitle": "Painting with Polygons: A Procedural Watercolor Engine",
"fno": "ttg2013050723",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Painting",
"Paints",
"Vectors",
"Pigments",
"Brushes",
"Heuristic Algorithms",
"Tablet Computers",
"Real Time",
"Natural Media",
"Watercolor Painting",
"Vector Graphics"
],
"authors": [
{
"givenName": "S.",
"surname": "DiVerdi",
"fullName": "S. DiVerdi",
"affiliation": "Adobe Syst., Inc., Oakland, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "A.",
"surname": "Krishnaswamy",
"fullName": "A. Krishnaswamy",
"affiliation": "Adobe Syst., Inc., Google, Inc., San Jose, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "R.",
"surname": "Mech",
"fullName": "R. Mech",
"affiliation": "Adobe Syst., Inc., Oakland, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "D.",
"surname": "Ito",
"fullName": "D. Ito",
"affiliation": "Adobe Syst., Inc., Oakland, CA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2013-05-01 00:00:00",
"pubType": "trans",
"pages": "723-735",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icis/2017/5507/0/07960025",
"title": "A flexible finger-mounted airbrush model for immersive freehand painting",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2017/07960025/12OmNBV9Ikp",
"parentPublication": {
"id": "proceedings/icis/2017/5507/0",
"title": "2017 IEEE/ACIS 16th International Conference on Computer and Information Science (ICIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/grc/2009/4830/0/05255155",
"title": "Non-photorealistic rendering of ink painting style diffusion",
"doi": null,
"abstractUrl": "/proceedings-article/grc/2009/05255155/12OmNyFU7aU",
"parentPublication": {
"id": "proceedings/grc/2009/4830/0",
"title": "2009 IEEE International Conference on Granular Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2017/0831/0/0831a158",
"title": "Real-Time Watercolor Simulation with Fluid Vorticity Within Brush Stroke",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2017/0831a158/12OmNyxXlwN",
"parentPublication": {
"id": "proceedings/iv/2017/0831/0",
"title": "2017 21st International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/07/07042343",
"title": "A Modular Framework for Digital Painting",
"doi": null,
"abstractUrl": "/journal/tg/2015/07/07042343/13rRUxDIthe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/07/06766598",
"title": "WYSIWYG Stereo Paintingwith Usability Enhancements",
"doi": null,
"abstractUrl": "/journal/tg/2014/07/06766598/13rRUyeCkah",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/01/08765801",
"title": "Vectorized Painting with Temporal Diffusion Curves",
"doi": null,
"abstractUrl": "/journal/tg/2021/01/08765801/1bLypqX0rwA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798200",
"title": "Panoramic Fluid Painting",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798200/1cJ0VsoPxfO",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/candarw/2019/5268/0/526800a465",
"title": "A Watercolor Painting Image Generation Using Stroke-Based Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/candarw/2019/526800a465/1gysHZ4vu8M",
"parentPublication": {
"id": "proceedings/candarw/2019/5268/0",
"title": "2019 Seventh International Symposium on Computing and Networking Workshops (CANDARW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800i432",
"title": "Painting Many Pasts: Synthesizing Time Lapse Videos of Paintings",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800i432/1m3nu7jSK6Q",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/candarw/2021/2835/0/283500a145",
"title": "A GPU Implementation of Watercolor Painting Image Generation",
"doi": null,
"abstractUrl": "/proceedings-article/candarw/2021/283500a145/1zw5NurwD72",
"parentPublication": {
"id": "proceedings/candarw/2021/2835/0",
"title": "2021 Ninth International Symposium on Computing and Networking Workshops (CANDARW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013050721",
"articleId": "13rRUxBa5rV",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013050736",
"articleId": "13rRUxASuSM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzvhvFS",
"title": "May",
"year": "2013",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxASuSM",
"doi": "10.1109/TVCG.2012.307",
"abstract": "We propose the first graphics processing unit (GPU) solution to compute the 2D constrained Delaunay triangulation (CDT) of a planar straight line graph (PSLG) consisting of points and edges. There are many existing CPU algorithms to solve the CDT problem in computational geometry, yet there has been no prior approach to solve this problem efficiently using the parallel computing power of the GPU. For the special case of the CDT problem where the PSLG consists of just points, which is simply the normal Delaunay triangulation (DT) problem, a hybrid approach using the GPU together with the CPU to partially speed up the computation has already been presented in the literature. Our work, on the other hand, accelerates the entire computation on the GPU. Our implementation using the CUDA programming model on NVIDIA GPUs is numerically robust, and runs up to an order of magnitude faster than the best sequential implementations on the CPU. This result is reflected in our experiment with both randomly generated PSLGs and real-world GIS data having millions of points and edges.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose the first graphics processing unit (GPU) solution to compute the 2D constrained Delaunay triangulation (CDT) of a planar straight line graph (PSLG) consisting of points and edges. There are many existing CPU algorithms to solve the CDT problem in computational geometry, yet there has been no prior approach to solve this problem efficiently using the parallel computing power of the GPU. For the special case of the CDT problem where the PSLG consists of just points, which is simply the normal Delaunay triangulation (DT) problem, a hybrid approach using the GPU together with the CPU to partially speed up the computation has already been presented in the literature. Our work, on the other hand, accelerates the entire computation on the GPU. Our implementation using the CUDA programming model on NVIDIA GPUs is numerically robust, and runs up to an order of magnitude faster than the best sequential implementations on the CPU. This result is reflected in our experiment with both randomly generated PSLGs and real-world GIS data having millions of points and edges.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose the first graphics processing unit (GPU) solution to compute the 2D constrained Delaunay triangulation (CDT) of a planar straight line graph (PSLG) consisting of points and edges. There are many existing CPU algorithms to solve the CDT problem in computational geometry, yet there has been no prior approach to solve this problem efficiently using the parallel computing power of the GPU. For the special case of the CDT problem where the PSLG consists of just points, which is simply the normal Delaunay triangulation (DT) problem, a hybrid approach using the GPU together with the CPU to partially speed up the computation has already been presented in the literature. Our work, on the other hand, accelerates the entire computation on the GPU. Our implementation using the CUDA programming model on NVIDIA GPUs is numerically robust, and runs up to an order of magnitude faster than the best sequential implementations on the CPU. This result is reflected in our experiment with both randomly generated PSLGs and real-world GIS data having millions of points and edges.",
"title": "Computing 2D Constrained Delaunay Triangulation Using the GPU",
"normalizedTitle": "Computing 2D Constrained Delaunay Triangulation Using the GPU",
"fno": "ttg2013050736",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Graphics Processing Units",
"Instruction Sets",
"Arrays",
"Strips",
"Standards",
"Color",
"Image Vectorization",
"GPGPU",
"Parallel Computation",
"Computational Geometry",
"Voronoi Diagram"
],
"authors": [
{
"givenName": null,
"surname": "Meng Qi",
"fullName": "Meng Qi",
"affiliation": "GR/ST/21-256C, Nat. Univ. of Singapore, Singapore, Singapore",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Thanh-Tung Cao",
"fullName": "Thanh-Tung Cao",
"affiliation": "Nat. Univ. of Singapore, Singapore, Singapore",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Tiow-Seng Tan",
"fullName": "Tiow-Seng Tan",
"affiliation": "Sch. of Comput., Nat. Univ. of Singapore, Singapore, Singapore",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2013-05-01 00:00:00",
"pubType": "trans",
"pages": "736-748",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/isvd/2006/2630/0/26300025",
"title": "On the Stretch Factor of the Constrained Delaunay Triangulation",
"doi": null,
"abstractUrl": "/proceedings-article/isvd/2006/26300025/12OmNAIvcYb",
"parentPublication": {
"id": "proceedings/isvd/2006/2630/0",
"title": "2006 3rd International Symposium on Voronoi Diagrams in Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2011/4353/2/05750952",
"title": "Image Completion Using Constrained Delaunay Triangulation",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2011/05750952/12OmNBNM8Sj",
"parentPublication": {
"id": "icicta/2011/4353/2",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/apdc/1997/7876/0/78760131",
"title": "An Improved Parallel Algorithm for Delaunay Triangulation on Distributed Memory Parallel Computers",
"doi": null,
"abstractUrl": "/proceedings-article/apdc/1997/78760131/12OmNBTawsE",
"parentPublication": {
"id": "proceedings/apdc/1997/7876/0",
"title": "Advances in Parallel and Distributed Computing Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gcis/2009/3571/1/3571a248",
"title": "An Intelligent Method of Detecting Multi-factors Neighborhood Relation Based On Constrained Delaunay Triangulation",
"doi": null,
"abstractUrl": "/proceedings-article/gcis/2009/3571a248/12OmNvDZF7c",
"parentPublication": {
"id": "gcis/2009/3571/1",
"title": "2009 WRI Global Congress on Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccrd/2010/4043/0/4043a326",
"title": "Delaunay Triangulation Based Three Dimensional Anatomical Facial Reconstruction from 2D CT Slices",
"doi": null,
"abstractUrl": "/proceedings-article/iccrd/2010/4043a326/12OmNvTk00T",
"parentPublication": {
"id": "proceedings/iccrd/2010/4043/0",
"title": "Computer Research and Development, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispa/2010/4190/0/4190a224",
"title": "The Merge Phase of Parallel Divide-and-Conquer Scheme for 3D Delaunay Triangulation",
"doi": null,
"abstractUrl": "/proceedings-article/ispa/2010/4190a224/12OmNwDj12p",
"parentPublication": {
"id": "proceedings/ispa/2010/4190/0",
"title": "International Symposium on Parallel and Distributed Processing with Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvd/2012/1910/0/06257653",
"title": "Localizing the Delaunay Triangulation and its Parallel Implementation",
"doi": null,
"abstractUrl": "/proceedings-article/isvd/2012/06257653/12OmNwoxSe8",
"parentPublication": {
"id": "proceedings/isvd/2012/1910/0",
"title": "2012 Ninth International Symposium on Voronoi Diagrams in Science and Engineering (ISVD 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1988/0878/0/00028293",
"title": "Constrained Delaunay triangulation for multiresolution surface description",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1988/00028293/12OmNxR5USd",
"parentPublication": {
"id": "proceedings/icpr/1988/0878/0",
"title": "9th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icetet/2008/3267/0/3267a282",
"title": "Local Delaunay Triangulation for Mobile Nodes",
"doi": null,
"abstractUrl": "/proceedings-article/icetet/2008/3267a282/12OmNxwWoUv",
"parentPublication": {
"id": "proceedings/icetet/2008/3267/0",
"title": "Emerging Trends in Engineering & Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pact/2019/3613/0/361300a409",
"title": "Computing Three-Dimensional Constrained Delaunay Refinement Using the GPU",
"doi": null,
"abstractUrl": "/proceedings-article/pact/2019/361300a409/1eLy400Tb2g",
"parentPublication": {
"id": "proceedings/pact/2019/3613/0",
"title": "2019 28th International Conference on Parallel Architectures and Compilation Techniques (PACT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013050723",
"articleId": "13rRUxBa5bY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013050749",
"articleId": "13rRUyp7tWW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzvhvFS",
"title": "May",
"year": "2013",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyp7tWW",
"doi": "10.1109/TVCG.2012.298",
"abstract": "Shading acquired materials with high-frequency illumination is computationally expensive. Estimating the shading integral requires multiple samples of the incident illumination. The number of samples required may vary across the image, and the image itself may have high- and low-frequency variations, depending on a combination of several factors. Adaptively distributing computational budget across the pixels for shading is a challenging problem. In this paper, we depict complex materials such as acquired reflectances, interactively, without any precomputation based on geometry. In each frame, we first estimate the frequencies in the local light field arriving at each pixel, as well as the variance of the shading integrand. Our frequency analysis accounts for combinations of a variety of factors: the reflectance of the object projecting to the pixel, the nature of the illumination, the local geometry and the camera position relative to the geometry and lighting. We then exploit this frequency information (bandwidth and variance) to adaptively sample for reconstruction and integration. For example, fewer pixels per unit area are shaded for pixels projecting onto diffuse objects, and fewer samples are used for integrating illumination incident on specular objects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Shading acquired materials with high-frequency illumination is computationally expensive. Estimating the shading integral requires multiple samples of the incident illumination. The number of samples required may vary across the image, and the image itself may have high- and low-frequency variations, depending on a combination of several factors. Adaptively distributing computational budget across the pixels for shading is a challenging problem. In this paper, we depict complex materials such as acquired reflectances, interactively, without any precomputation based on geometry. In each frame, we first estimate the frequencies in the local light field arriving at each pixel, as well as the variance of the shading integrand. Our frequency analysis accounts for combinations of a variety of factors: the reflectance of the object projecting to the pixel, the nature of the illumination, the local geometry and the camera position relative to the geometry and lighting. We then exploit this frequency information (bandwidth and variance) to adaptively sample for reconstruction and integration. For example, fewer pixels per unit area are shaded for pixels projecting onto diffuse objects, and fewer samples are used for integrating illumination incident on specular objects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Shading acquired materials with high-frequency illumination is computationally expensive. Estimating the shading integral requires multiple samples of the incident illumination. The number of samples required may vary across the image, and the image itself may have high- and low-frequency variations, depending on a combination of several factors. Adaptively distributing computational budget across the pixels for shading is a challenging problem. In this paper, we depict complex materials such as acquired reflectances, interactively, without any precomputation based on geometry. In each frame, we first estimate the frequencies in the local light field arriving at each pixel, as well as the variance of the shading integrand. Our frequency analysis accounts for combinations of a variety of factors: the reflectance of the object projecting to the pixel, the nature of the illumination, the local geometry and the camera position relative to the geometry and lighting. We then exploit this frequency information (bandwidth and variance) to adaptively sample for reconstruction and integration. For example, fewer pixels per unit area are shaded for pixels projecting onto diffuse objects, and fewer samples are used for integrating illumination incident on specular objects.",
"title": "Interactive Rendering of Acquired Materials on Dynamic Geometry Using Frequency Analysis",
"normalizedTitle": "Interactive Rendering of Acquired Materials on Dynamic Geometry Using Frequency Analysis",
"fno": "ttg2013050749",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Bandwidth",
"Lighting",
"Materials",
"Light Sources",
"Geometry",
"Rendering Computer Graphics",
"Convolution",
"Fourier Analysis",
"Computer Graphics",
"Rendering",
"Illumination Simulation",
"Measured Reflectance"
],
"authors": [
{
"givenName": "M. M.",
"surname": "Bagher",
"fullName": "M. M. Bagher",
"affiliation": "Dept. Inf. et de Rech. Operationelle, Univ. of Montreal, Montreal, QC, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "C.",
"surname": "Soler",
"fullName": "C. Soler",
"affiliation": "INRIA Rhone-Alpes, Montbonnot, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "K.",
"surname": "Subr",
"fullName": "K. Subr",
"affiliation": "Univ. Coll. London, London, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "L.",
"surname": "Belcour",
"fullName": "L. Belcour",
"affiliation": "Inria Bordeaux - Sud-Ouest, Talence, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "N.",
"surname": "Holzschuch",
"fullName": "N. Holzschuch",
"affiliation": "INRIA Rhone-Alpes, Montbonnot, France",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2013-05-01 00:00:00",
"pubType": "trans",
"pages": "749-761",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2011/0063/0/06130334",
"title": "Illumination estimation from shadow borders",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130334/12OmNs4S8L5",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2017/2089/0/2089a056",
"title": "Voxel-Based Interactive Rendering of Translucent Materials under Area Lights Using Sparse Samples",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2017/2089a056/12OmNvDqsQf",
"parentPublication": {
"id": "proceedings/cw/2017/2089/0",
"title": "2017 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118d922",
"title": "Exploiting Shading Cues in Kinect IR Images for Geometry Refinement",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118d922/12OmNvIxeXq",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206614",
"title": "A projector-camera setup for geometry-invariant frequency demultiplexing",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206614/12OmNvoWV1H",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2013/5050/0/5050a913",
"title": "Cartoon Rendering Illumination Model Based on Phong",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2013/5050a913/12OmNwoPtun",
"parentPublication": {
"id": "proceedings/icig/2013/5050/0",
"title": "2013 Seventh International Conference on Image and Graphics (ICIG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/searis/2014/9955/0/07152799",
"title": "guacamole - An extensible scene graph and rendering framework based on deferred shading",
"doi": null,
"abstractUrl": "/proceedings-article/searis/2014/07152799/12OmNzA6GLj",
"parentPublication": {
"id": "proceedings/searis/2014/9955/0",
"title": "2014 IEEE 7th Workshop on Software Engineering and Architectures for Realtime Interactive Systems (SEARIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2013/6463/0/06528300",
"title": "Descattering of transmissive observation using Parallel High-Frequency Illumination",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2013/06528300/12OmNzmclka",
"parentPublication": {
"id": "proceedings/iccp/2013/6463/0",
"title": "2013 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/12/ttg2013122946",
"title": "Lighting Design for Globally Illuminated Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2013/12/ttg2013122946/13rRUwvBy8U",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2013/02/ttp2013020437",
"title": "Simultaneous Cast Shadows, Illumination and Geometry Inference Using Hypergraphs",
"doi": null,
"abstractUrl": "/journal/tp/2013/02/ttp2013020437/13rRUyfKIEo",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09904431",
"title": "Neural Global Illumination: Interactive Indirect Illumination Prediction under Dynamic Area Lights",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09904431/1H0GdxnVnws",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013050736",
"articleId": "13rRUxASuSM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013050762",
"articleId": "13rRUxOve9H",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzvhvFS",
"title": "May",
"year": "2013",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxOve9H",
"doi": "10.1109/TVCG.2012.314",
"abstract": "A 4D parametric motion graph representation is presented for interactive animation from actor performance capture in a multiple camera studio. The representation is based on a 4D model database of temporally aligned mesh sequence reconstructions for multiple motions. High-level movement controls such as speed and direction are achieved by blending multiple mesh sequences of related motions. A real-time mesh sequence blending approach is introduced, which combines the realistic deformation of previous nonlinear solutions with efficient online computation. Transitions between different parametric motion spaces are evaluated in real time based on surface shape and motion similarity. Four-dimensional parametric motion graphs allow real-time interactive character animation while preserving the natural dynamics of the captured performance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A 4D parametric motion graph representation is presented for interactive animation from actor performance capture in a multiple camera studio. The representation is based on a 4D model database of temporally aligned mesh sequence reconstructions for multiple motions. High-level movement controls such as speed and direction are achieved by blending multiple mesh sequences of related motions. A real-time mesh sequence blending approach is introduced, which combines the realistic deformation of previous nonlinear solutions with efficient online computation. Transitions between different parametric motion spaces are evaluated in real time based on surface shape and motion similarity. Four-dimensional parametric motion graphs allow real-time interactive character animation while preserving the natural dynamics of the captured performance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A 4D parametric motion graph representation is presented for interactive animation from actor performance capture in a multiple camera studio. The representation is based on a 4D model database of temporally aligned mesh sequence reconstructions for multiple motions. High-level movement controls such as speed and direction are achieved by blending multiple mesh sequences of related motions. A real-time mesh sequence blending approach is introduced, which combines the realistic deformation of previous nonlinear solutions with efficient online computation. Transitions between different parametric motion spaces are evaluated in real time based on surface shape and motion similarity. Four-dimensional parametric motion graphs allow real-time interactive character animation while preserving the natural dynamics of the captured performance.",
"title": "Interactive Animation of 4D Performance Capture",
"normalizedTitle": "Interactive Animation of 4D Performance Capture",
"fno": "ttg2013050762",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Animation",
"Real Time Systems",
"Databases",
"Aerospace Electronics",
"Interpolation",
"Mesh Generation",
"Shape",
"4 D Performance Capture",
"Character Animation",
"3 D Video",
"Real Time Animation",
"Multiview Reconstruction",
"Video Based Animation",
"4 D Modeling"
],
"authors": [
{
"givenName": "Dan",
"surname": "Casas",
"fullName": "Dan Casas",
"affiliation": "Centre for Vision Speech & Signal Process., Univ. of Surrey, Guildford, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "M.",
"surname": "Tejera",
"fullName": "M. Tejera",
"affiliation": "Centre for Vision Speech & Signal Process., Univ. of Surrey, Guildford, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "J.",
"surname": "Guillemaut",
"fullName": "J. Guillemaut",
"affiliation": "Centre for Vision Speech & Signal Process., Univ. of Surrey, Guildford, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "A.",
"surname": "Hilton",
"fullName": "A. Hilton",
"affiliation": "Centre for Vision Speech & Signal Process., Univ. of Surrey, Guildford, UK",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2013-05-01 00:00:00",
"pubType": "trans",
"pages": "762-773",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccis/2012/4789/0/4789a530",
"title": "Data-Driven Based Interactive Motion Blending",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2012/4789a530/12OmNAY79d9",
"parentPublication": {
"id": "proceedings/iccis/2012/4789/0",
"title": "2012 Fourth International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ds-rt/2011/1643/0/06051804",
"title": "4D Performance Modelling and Animation",
"doi": null,
"abstractUrl": "/proceedings-article/ds-rt/2011/06051804/12OmNvA1heq",
"parentPublication": {
"id": "proceedings/ds-rt/2011/1643/0",
"title": "2011 IEEE/ACM 15th International Symposium on Distributed Simulation and Real Time Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2015/8332/0/8332a478",
"title": "Video Based Animation Synthesis with the Essential Graph",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a478/12OmNwFid1n",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icat/2006/2754/0/27540626",
"title": "A Motion Blending Approach Based on Unsupervised Clustering",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2006/27540626/12OmNzzfTlW",
"parentPublication": {
"id": "proceedings/icat/2006/2754/0",
"title": "16th International Conference on Artificial Reality and Telexistence--Workshops (ICAT'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2007/03/mcg2007030021",
"title": "Surface Capture for Performance-Based Animation",
"doi": null,
"abstractUrl": "/magazine/cg/2007/03/mcg2007030021/13rRUIJcWnq",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/06/08352750",
"title": "Surface Motion Capture Animation Synthesis",
"doi": null,
"abstractUrl": "/journal/tg/2019/06/08352750/13rRUwjXZSl",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/06/ttg2009061587",
"title": "GL4D: A GPU-based Architecture for Interactive 4D Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2009/06/ttg2009061587/13rRUwjoNwY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2018/03/mcg2018030131",
"title": "4D Cubism: Modeling, Animation, and Fabrication of Artistic Shapes",
"doi": null,
"abstractUrl": "/magazine/cg/2018/03/mcg2018030131/13rRUy3gmXD",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200l1323",
"title": "Learning Motion Priors for 4D Human Body Capture in 3D Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200l1323/1BmLs4NuZAQ",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800e725",
"title": "A Spatiotemporal Volumetric Interpolation Network for 4D Dynamic Medical Image",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800e725/1m3o6jEDHEY",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013050749",
"articleId": "13rRUyp7tWW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013050774",
"articleId": "13rRUygT7sE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRQh",
"name": "ttg2013050762s.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013050762s.zip",
"extension": "zip",
"size": "23.9 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzvhvFS",
"title": "May",
"year": "2013",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUygT7sE",
"doi": "10.1109/TVCG.2012.149",
"abstract": "We propose feature-based motion graphs for realistic locomotion synthesis among obstacles. Among several advantages, feature-based motion graphs achieve improved results in search queries, eliminate the need of postprocessing for foot skating removal, and reduce the computational requirements in comparison to traditional motion graphs. Our contributions are threefold. First, we show that choosing transitions based on relevant features significantly reduces graph construction time and leads to improved search performances. Second, we employ a fast channel search method that confines the motion graph search to a free channel with guaranteed clearance among obstacles, achieving faster and improved results that avoid expensive collision checking. Lastly, we present a motion deformation model based on Inverse Kinematics applied over the transitions of a solution branch. Each transition is assigned a continuous deformation range that does not exceed the original transition cost threshold specified by the user for the graph construction. The obtained deformation improves the reachability of the feature-based motion graph and in turn also reduces the time spent during search. The results obtained by the proposed methods are evaluated and quantified, and they demonstrate significant improvements in comparison to traditional motion graph techniques.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose feature-based motion graphs for realistic locomotion synthesis among obstacles. Among several advantages, feature-based motion graphs achieve improved results in search queries, eliminate the need of postprocessing for foot skating removal, and reduce the computational requirements in comparison to traditional motion graphs. Our contributions are threefold. First, we show that choosing transitions based on relevant features significantly reduces graph construction time and leads to improved search performances. Second, we employ a fast channel search method that confines the motion graph search to a free channel with guaranteed clearance among obstacles, achieving faster and improved results that avoid expensive collision checking. Lastly, we present a motion deformation model based on Inverse Kinematics applied over the transitions of a solution branch. Each transition is assigned a continuous deformation range that does not exceed the original transition cost threshold specified by the user for the graph construction. The obtained deformation improves the reachability of the feature-based motion graph and in turn also reduces the time spent during search. The results obtained by the proposed methods are evaluated and quantified, and they demonstrate significant improvements in comparison to traditional motion graph techniques.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose feature-based motion graphs for realistic locomotion synthesis among obstacles. Among several advantages, feature-based motion graphs achieve improved results in search queries, eliminate the need of postprocessing for foot skating removal, and reduce the computational requirements in comparison to traditional motion graphs. Our contributions are threefold. First, we show that choosing transitions based on relevant features significantly reduces graph construction time and leads to improved search performances. Second, we employ a fast channel search method that confines the motion graph search to a free channel with guaranteed clearance among obstacles, achieving faster and improved results that avoid expensive collision checking. Lastly, we present a motion deformation model based on Inverse Kinematics applied over the transitions of a solution branch. Each transition is assigned a continuous deformation range that does not exceed the original transition cost threshold specified by the user for the graph construction. The obtained deformation improves the reachability of the feature-based motion graph and in turn also reduces the time spent during search. The results obtained by the proposed methods are evaluated and quantified, and they demonstrate significant improvements in comparison to traditional motion graph techniques.",
"title": "Analyzing Locomotion Synthesis with Feature-Based Motion Graphs",
"normalizedTitle": "Analyzing Locomotion Synthesis with Feature-Based Motion Graphs",
"fno": "ttg2013050774",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Motion Segmentation",
"Feature Extraction",
"Image Segmentation",
"Detectors",
"Joints",
"Databases",
"Foot",
"Human Like Motion Planning",
"Computer Animation",
"Locomotion",
"Motion Capture"
],
"authors": [
{
"givenName": "M.",
"surname": "Mahmudi",
"fullName": "M. Mahmudi",
"affiliation": "Sch. of Eng., Univ. of California, Merced, Merced, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "M.",
"surname": "Kallmann",
"fullName": "M. Kallmann",
"affiliation": "Sch. of Eng., Univ. of California, Merced, Merced, CA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2013-05-01 00:00:00",
"pubType": "trans",
"pages": "774-786",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2011/348/0/06011912",
"title": "Motion synthesis for synchronizing with streaming music by segment-based search on metadata motion graphs",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06011912/12OmNAoUTkt",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2004/2158/1/01315121",
"title": "Modeling complex motion by tracking and editing hidden Markov graphs",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2004/01315121/12OmNBInLkR",
"parentPublication": {
"id": "proceedings/cvpr/2004/2158/1",
"title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2011/4501/0/4501a955",
"title": "GPU-based Motion Blending for Motion Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2011/4501a955/12OmNBcShTc",
"parentPublication": {
"id": "proceedings/iccis/2011/4501/0",
"title": "2011 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgames/2012/1120/0/S2002",
"title": "Real-time motion editing for reaching tasks using multiple internal graphs",
"doi": null,
"abstractUrl": "/proceedings-article/cgames/2012/S2002/12OmNvH7fjV",
"parentPublication": {
"id": "proceedings/cgames/2012/1120/0",
"title": "2012 17th International Conference on Computer Games (CGAMES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2013/2246/0/2246a330",
"title": "Multi-Touch Interface for Character Motion Control Using Model-Based Approach",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2013/2246a330/12OmNvq5jDZ",
"parentPublication": {
"id": "proceedings/cw/2013/2246/0",
"title": "2013 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2015/8332/0/8332a478",
"title": "Video Based Animation Synthesis with the Essential Graph",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a478/12OmNwFid1n",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/psivt/2010/4285/0/4285a487",
"title": "Sophisticated Construction and Search of 2D Motion Graphs for Synthesizing Videos",
"doi": null,
"abstractUrl": "/proceedings-article/psivt/2010/4285a487/12OmNx5Yvb6",
"parentPublication": {
"id": "proceedings/psivt/2010/4285/0",
"title": "Image and Video Technology, Pacific-Rim Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2013/0015/0/06607506",
"title": "Motion synthesis for affective agents using piecewise principal component regression",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2013/06607506/12OmNyQYt2c",
"parentPublication": {
"id": "proceedings/icme/2013/0015/0",
"title": "2013 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/06/08352750",
"title": "Surface Motion Capture Animation Synthesis",
"doi": null,
"abstractUrl": "/journal/tg/2019/06/08352750/13rRUwjXZSl",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013050762",
"articleId": "13rRUxOve9H",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013050787",
"articleId": "13rRUxZzAhF",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTYesN2",
"name": "ttg2013050774s.m4v",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013050774s.m4v",
"extension": "m4v",
"size": "24.5 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzvhvFS",
"title": "May",
"year": "2013",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxZzAhF",
"doi": "10.1109/TVCG.2012.173",
"abstract": "We propose a biharmonic model for cross-object volumetric mapping. This new computational model aims to facilitate the mapping of solid models with complicated geometry or heterogeneous inner structures. In order to solve cross-shape mapping between such models through divide and conquer, solid models can be decomposed into subparts upon which mappings is computed individually. The biharmonic volumetric mapping can be performed in each subregion separately. Unlike the widely used harmonic mapping which only allows C0 continuity along the segmentation boundary interfaces, this biharmonic model can provide C1 smoothness. We demonstrate the efficacy of our mapping framework on various geometric models with complex geometry (which are decomposed into subparts with simpler and solvable geometry) or heterogeneous interior structures (whose different material layers can be segmented and processed separately).",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a biharmonic model for cross-object volumetric mapping. This new computational model aims to facilitate the mapping of solid models with complicated geometry or heterogeneous inner structures. In order to solve cross-shape mapping between such models through divide and conquer, solid models can be decomposed into subparts upon which mappings is computed individually. The biharmonic volumetric mapping can be performed in each subregion separately. Unlike the widely used harmonic mapping which only allows C0 continuity along the segmentation boundary interfaces, this biharmonic model can provide C1 smoothness. We demonstrate the efficacy of our mapping framework on various geometric models with complex geometry (which are decomposed into subparts with simpler and solvable geometry) or heterogeneous interior structures (whose different material layers can be segmented and processed separately).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a biharmonic model for cross-object volumetric mapping. This new computational model aims to facilitate the mapping of solid models with complicated geometry or heterogeneous inner structures. In order to solve cross-shape mapping between such models through divide and conquer, solid models can be decomposed into subparts upon which mappings is computed individually. The biharmonic volumetric mapping can be performed in each subregion separately. Unlike the widely used harmonic mapping which only allows C0 continuity along the segmentation boundary interfaces, this biharmonic model can provide C1 smoothness. We demonstrate the efficacy of our mapping framework on various geometric models with complex geometry (which are decomposed into subparts with simpler and solvable geometry) or heterogeneous interior structures (whose different material layers can be segmented and processed separately).",
"title": "Biharmonic Volumetric Mapping Using Fundamental Solutions",
"normalizedTitle": "Biharmonic Volumetric Mapping Using Fundamental Solutions",
"fno": "ttg2013050787",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Computational Modeling",
"Harmonic Analysis",
"Mathematical Model",
"Equations",
"Geometry",
"Shape",
"Boundary Conditions",
"Biharmonic Mapping",
"Volumetric Mapping"
],
"authors": [
{
"givenName": null,
"surname": "Huanhuan Xu",
"fullName": "Huanhuan Xu",
"affiliation": "Sch. of Electr. Eng. & Comput. Sci., Louisiana State Univ., Baton Rouge, LA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Wuyi Yu",
"fullName": "Wuyi Yu",
"affiliation": "Sch. of Electr. Eng. & Comput. Sci., Louisiana State Univ., Baton Rouge, LA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Shiyuan Gu",
"fullName": "Shiyuan Gu",
"affiliation": "Dept. of Math., Louisiana State Univ., Baton Rouge, LA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Xin Li",
"fullName": "Xin Li",
"affiliation": "Sch. of Electr. Eng. & Comput. Sci., Louisiana State Univ., Baton Rouge, LA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2013-05-01 00:00:00",
"pubType": "trans",
"pages": "787-798",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2008/1971/0/04480772",
"title": "New Rendering Approach for Composable Volumetric Lenses",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480772/12OmNBAqZId",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vv/2002/7641/0/76410055",
"title": "Haptics-Based Volumetric Modeling Using Dynamic Spline-Based Implicit Functions",
"doi": null,
"abstractUrl": "/proceedings-article/vv/2002/76410055/12OmNyqRn5T",
"parentPublication": {
"id": "proceedings/vv/2002/7641/0",
"title": "Volume Visualization and Graphics, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2000/02/v0181",
"title": "Conformal Surface Parameterization for Texture Mapping",
"doi": null,
"abstractUrl": "/journal/tg/2000/02/v0181/13rRUxBJhFj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1998/01/v0055",
"title": "Modeling, Animating, and Rendering Complex Scenes Using Volumetric Textures",
"doi": null,
"abstractUrl": "/journal/tg/1998/01/v0055/13rRUxly95q",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/02/06894217",
"title": "Space-Time Transfinite Interpolation of Volumetric Material Properties",
"doi": null,
"abstractUrl": "/journal/tg/2015/02/06894217/13rRUyoPSP7",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200l1936",
"title": "Building-GAN: Graph-Conditioned Architectural Volumetric Design Generation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200l1936/1BmGNVHFEZi",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600g145",
"title": "NeuralHOFusion: Neural Volumetric Rendering under Human-object Interactions",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600g145/1H1itCwY51e",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a597",
"title": "Dynamic Projection Mapping with 3D Images Using Volumetric Display",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a597/1tnX0LxdiuI",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700b659",
"title": "Autonomous Tracking For Volumetric Video Sequences",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700b659/1uqGjzXRQ3e",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900h115",
"title": "NeuTex: Neural Texture Mapping for Volumetric Neural Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900h115/1yeLdyIKnV6",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013050774",
"articleId": "13rRUygT7sE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013050799",
"articleId": "13rRUB7a111",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgRi",
"name": "ttg2013050787s.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013050787s.pdf",
"extension": "pdf",
"size": "90.4 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzvhvFS",
"title": "May",
"year": "2013",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUB7a111",
"doi": "10.1109/TVCG.2012.147",
"abstract": "We introduce an algorithm for construction of the Morse hierarchy, i.e., a hierarchy of Morse decompositions of a piecewise constant vector field on a surface driven by stability of the Morse sets with respect to perturbation of the vector field. Our approach builds upon earlier work on stable Morse decompositions, which can be used to obtain Morse sets of user-prescribed stability. More stable Morse decompositions are coarser, i.e., they consist of larger Morse sets. In this work, we develop an algorithm for tracking the growth of Morse sets and topological events (mergers) that they undergo as their stability is gradually increased. The resulting Morse hierarchy can be explored interactively. We provide examples demonstrating that it can provide a useful coarse overview of the vector field topology.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We introduce an algorithm for construction of the Morse hierarchy, i.e., a hierarchy of Morse decompositions of a piecewise constant vector field on a surface driven by stability of the Morse sets with respect to perturbation of the vector field. Our approach builds upon earlier work on stable Morse decompositions, which can be used to obtain Morse sets of user-prescribed stability. More stable Morse decompositions are coarser, i.e., they consist of larger Morse sets. In this work, we develop an algorithm for tracking the growth of Morse sets and topological events (mergers) that they undergo as their stability is gradually increased. The resulting Morse hierarchy can be explored interactively. We provide examples demonstrating that it can provide a useful coarse overview of the vector field topology.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We introduce an algorithm for construction of the Morse hierarchy, i.e., a hierarchy of Morse decompositions of a piecewise constant vector field on a surface driven by stability of the Morse sets with respect to perturbation of the vector field. Our approach builds upon earlier work on stable Morse decompositions, which can be used to obtain Morse sets of user-prescribed stability. More stable Morse decompositions are coarser, i.e., they consist of larger Morse sets. In this work, we develop an algorithm for tracking the growth of Morse sets and topological events (mergers) that they undergo as their stability is gradually increased. The resulting Morse hierarchy can be explored interactively. We provide examples demonstrating that it can provide a useful coarse overview of the vector field topology.",
"title": "Hierarchy of Stable Morse Decompositions",
"normalizedTitle": "Hierarchy of Stable Morse Decompositions",
"fno": "ttg2013050799",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Vectors",
"Trajectory",
"Numerical Stability",
"Stability Criteria",
"Topology",
"Indexes",
"Vector Field",
"Morse Decomposition",
"Persistence"
],
"authors": [
{
"givenName": "A.",
"surname": "Szymczak",
"fullName": "A. Szymczak",
"affiliation": "Dept. of Electr. Eng. & Comput. Sci., Colorado Sch. of Mines, Golden, CO, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2013-05-01 00:00:00",
"pubType": "trans",
"pages": "799-810",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/wicsa/2008/3092/0/3092a029",
"title": "On the Quantitative Analysis of Architecture Stability in Aspectual Decompositions",
"doi": null,
"abstractUrl": "/proceedings-article/wicsa/2008/3092a029/12OmNAsTgWO",
"parentPublication": {
"id": "proceedings/wicsa/2008/3092/0",
"title": "Software Architecture, Working IEEE/IFIP Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109a999",
"title": "Modeling and Generalization of Discrete Morse Terrain Decompositions",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a999/12OmNBTs7BJ",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/waina/2017/6231/0/6231a438",
"title": "An Evaluation of Psychological-Competitive Ability for Rugby Players Using the Analytic Hierarchy Process",
"doi": null,
"abstractUrl": "/proceedings-article/waina/2017/6231a438/12OmNyyO8H8",
"parentPublication": {
"id": "proceedings/waina/2017/6231/0",
"title": "2017 31st International Conference on Advanced Information Networking and Applications: Workshops (WAINA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2015/03/06873268",
"title": "Skeletonization and Partitioning of Digital Images Using Discrete Morse Theory",
"doi": null,
"abstractUrl": "/journal/tp/2015/03/06873268/13rRUx0xPod",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/nt/2013/01/06209453",
"title": "Scheduling in a Random Environment: Stability and Asymptotic Optimality",
"doi": null,
"abstractUrl": "/journal/nt/2013/01/06209453/13rRUxAASY4",
"parentPublication": {
"id": "trans/nt",
"title": "IEEE/ACM Transactions on Networking",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/05/ttg2012050767",
"title": "Morse Set Classification and Hierarchical Refinement Using Conley Index",
"doi": null,
"abstractUrl": "/journal/tg/2012/05/ttg2012050767/13rRUxASuAt",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/10/ttg2012101757",
"title": "Parallel Computation of 2D Morse-Smale Complexes",
"doi": null,
"abstractUrl": "/journal/tg/2012/10/ttg2012101757/13rRUxASuSL",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2008/04/ttg2008040848",
"title": "Efficient Morse Decompositions of Vector Fields",
"doi": null,
"abstractUrl": "/journal/tg/2008/04/ttg2008040848/13rRUxjQyvc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/06/ttg2012060938",
"title": "Robust Morse Decompositions of Piecewise Constant Vector Fields",
"doi": null,
"abstractUrl": "/journal/tg/2012/06/ttg2012060938/13rRUxlgxOj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904436",
"title": "SizePairs: Achieving Stable and Balanced Temporal Treemaps using Hierarchical Size-based Pairing",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904436/1H1gs021YFa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013050787",
"articleId": "13rRUxZzAhF",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013050811",
"articleId": "13rRUNvgziC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTYet63",
"name": "ttg2013050799s.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013050799s.zip",
"extension": "zip",
"size": "14.8 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzvhvFS",
"title": "May",
"year": "2013",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUNvgziC",
"doi": "10.1109/TVCG.2012.174",
"abstract": "In this paper, a physics-based framework is presented to visualize the human tongue deformation. The tongue is modeled with the Finite Element Method (FEM) and driven by the motion capture data gathered during speech production. Several novel deformation visualization techniques are presented for in-depth data analysis and exploration. To reveal the hidden semantic information of the tongue deformation, we present a novel physics-based volume segmentation algorithm. This is accomplished by decomposing the tongue model into segments based on its deformation pattern with the computation of deformation subspaces and fitting the target deformation locally at each segment. In addition, the strain energy is utilized to provide an intuitive low-dimensional visualization for the high-dimensional sequential motion. Energy-interpolation-based morphing is also equipped to effectively highlight the subtle differences of the 3D deformed shapes without any visual occlusion. Our experimental results and analysis demonstrate the effectiveness of this framework. The proposed methods, though originally designed for the exploration of the tongue deformation, are also valid for general deformation analysis of other shapes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, a physics-based framework is presented to visualize the human tongue deformation. The tongue is modeled with the Finite Element Method (FEM) and driven by the motion capture data gathered during speech production. Several novel deformation visualization techniques are presented for in-depth data analysis and exploration. To reveal the hidden semantic information of the tongue deformation, we present a novel physics-based volume segmentation algorithm. This is accomplished by decomposing the tongue model into segments based on its deformation pattern with the computation of deformation subspaces and fitting the target deformation locally at each segment. In addition, the strain energy is utilized to provide an intuitive low-dimensional visualization for the high-dimensional sequential motion. Energy-interpolation-based morphing is also equipped to effectively highlight the subtle differences of the 3D deformed shapes without any visual occlusion. Our experimental results and analysis demonstrate the effectiveness of this framework. The proposed methods, though originally designed for the exploration of the tongue deformation, are also valid for general deformation analysis of other shapes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, a physics-based framework is presented to visualize the human tongue deformation. The tongue is modeled with the Finite Element Method (FEM) and driven by the motion capture data gathered during speech production. Several novel deformation visualization techniques are presented for in-depth data analysis and exploration. To reveal the hidden semantic information of the tongue deformation, we present a novel physics-based volume segmentation algorithm. This is accomplished by decomposing the tongue model into segments based on its deformation pattern with the computation of deformation subspaces and fitting the target deformation locally at each segment. In addition, the strain energy is utilized to provide an intuitive low-dimensional visualization for the high-dimensional sequential motion. Energy-interpolation-based morphing is also equipped to effectively highlight the subtle differences of the 3D deformed shapes without any visual occlusion. Our experimental results and analysis demonstrate the effectiveness of this framework. The proposed methods, though originally designed for the exploration of the tongue deformation, are also valid for general deformation analysis of other shapes.",
"title": "Physics-Based Deformable Tongue Visualization",
"normalizedTitle": "Physics-Based Deformable Tongue Visualization",
"fno": "ttg2013050811",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Tongue",
"Sensors",
"Speech",
"Production",
"Shape",
"Deformable Models",
"Visualization",
"Modal Analysis",
"Deformable Model",
"Tongue",
"Finite Element Method"
],
"authors": [
{
"givenName": null,
"surname": "Yin Yang",
"fullName": "Yin Yang",
"affiliation": "Dept. of Comput. Sci., Univ. of Texas at Dallas, Richardson, TX, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Xiaohu Guo",
"fullName": "Xiaohu Guo",
"affiliation": "Dept. of Comput. Sci., Univ. of Texas at Dallas, Richardson, TX, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "J.",
"surname": "Vick",
"fullName": "J. Vick",
"affiliation": "Dept. of Psychological Sci., Case Western Reserve Univ., Cleveland, OH, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "L. G.",
"surname": "Torres",
"fullName": "L. G. Torres",
"affiliation": "Dept. of Comput. Sci., Univ. of North Carolina at Chapel Hill, Chapel Hill, NC, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "T. F.",
"surname": "Campbell",
"fullName": "T. F. Campbell",
"affiliation": "Callier Center for Commun. Disorders, Univ. of Texas at Dallas, Richardson, TX, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2013-05-01 00:00:00",
"pubType": "trans",
"pages": "811-823",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/grc/2011/0372/0/06122581",
"title": "A new tongue model based on muscle-control",
"doi": null,
"abstractUrl": "/proceedings-article/grc/2011/06122581/12OmNA0MZ1l",
"parentPublication": {
"id": "proceedings/grc/2011/0372/0",
"title": "2011 IEEE International Conference on Granular Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccsee/2012/4647/2/4647b646",
"title": "A Prior Knowledge-Based Algorithm for Tongue Body Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/iccsee/2012/4647b646/12OmNrAv3CY",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/miar/2001/1113/0/11130125",
"title": "Realistic Deformable Models for Simulating the Tongue during Laryngoscopy",
"doi": null,
"abstractUrl": "/proceedings-article/miar/2001/11130125/12OmNrMZpoL",
"parentPublication": {
"id": "proceedings/miar/2001/1113/0",
"title": "Medical Imaging and Augmented Reality, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892238",
"title": "Acoustic VR in the mouth: A real-time speech-driven visual tongue system",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892238/12OmNvmowOB",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761651",
"title": "Tongue line extraction",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761651/12OmNx0RIKz",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibmw/2012/2746/0/06470316",
"title": "Features for automated tongue image shape classification",
"doi": null,
"abstractUrl": "/proceedings-article/bibmw/2012/06470316/12OmNySosJ0",
"parentPublication": {
"id": "proceedings/bibmw/2012/2746/0",
"title": "2012 IEEE International Conference on Bioinformatics and Biomedicine Workshops (BIBMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2014/4717/0/06890595",
"title": "Modeling a realistic 3D physiological tongue for visual speech synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2014/06890595/12OmNyuy9UE",
"parentPublication": {
"id": "proceedings/icmew/2014/4717/0",
"title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1994/6240/0/00324008",
"title": "Modeling and animating the human tongue during speech production",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1994/00324008/12OmNzcPAMk",
"parentPublication": {
"id": "proceedings/ca/1994/6240/0",
"title": "Proceedings of Computer Animation '94",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08545185",
"title": "Reducing Tongue Shape Dimensionality from Hundreds of Available Resources Using Autoencoder",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545185/17D45WIXbQ2",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/taai/2018/1229/0/122900a014",
"title": "Tongue Fissure Visualization with Deep Learning",
"doi": null,
"abstractUrl": "/proceedings-article/taai/2018/122900a014/17D45X2fUEP",
"parentPublication": {
"id": "proceedings/taai/2018/1229/0",
"title": "2018 Conference on Technologies and Applications of Artificial Intelligence (TAAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013050799",
"articleId": "13rRUB7a111",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013050824",
"articleId": "13rRUwI5Ug6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRFf",
"name": "ttg2013050811s.wmv",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013050811s.wmv",
"extension": "wmv",
"size": "42.2 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzvhvFS",
"title": "May",
"year": "2013",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwI5Ug6",
"doi": "10.1109/TVCG.2012.148",
"abstract": "We present PoseShop - a pipeline to construct segmented human image database with minimal manual intervention. By downloading, analyzing, and filtering massive amounts of human images from the Internet, we achieve a database which contains 400 thousands human figures that are segmented out of their background. The human figures are organized based on action semantic, clothes attributes, and indexed by the shape of their poses. They can be queried using either silhouette sketch or a skeleton to find a given pose. We demonstrate applications for this database for multiframe personalized content synthesis in the form of comic-strips, where the main character is the user or his/her friends. We address the two challenges of such synthesis, namely personalization and consistency over a set of frames, by introducing head swapping and clothes swapping techniques. We also demonstrate an action correlation analysis application to show the usefulness of the database for vision application.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present PoseShop - a pipeline to construct segmented human image database with minimal manual intervention. By downloading, analyzing, and filtering massive amounts of human images from the Internet, we achieve a database which contains 400 thousands human figures that are segmented out of their background. The human figures are organized based on action semantic, clothes attributes, and indexed by the shape of their poses. They can be queried using either silhouette sketch or a skeleton to find a given pose. We demonstrate applications for this database for multiframe personalized content synthesis in the form of comic-strips, where the main character is the user or his/her friends. We address the two challenges of such synthesis, namely personalization and consistency over a set of frames, by introducing head swapping and clothes swapping techniques. We also demonstrate an action correlation analysis application to show the usefulness of the database for vision application.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present PoseShop - a pipeline to construct segmented human image database with minimal manual intervention. By downloading, analyzing, and filtering massive amounts of human images from the Internet, we achieve a database which contains 400 thousands human figures that are segmented out of their background. The human figures are organized based on action semantic, clothes attributes, and indexed by the shape of their poses. They can be queried using either silhouette sketch or a skeleton to find a given pose. We demonstrate applications for this database for multiframe personalized content synthesis in the form of comic-strips, where the main character is the user or his/her friends. We address the two challenges of such synthesis, namely personalization and consistency over a set of frames, by introducing head swapping and clothes swapping techniques. We also demonstrate an action correlation analysis application to show the usefulness of the database for vision application.",
"title": "PoseShop: Human Image Database Construction and Personalized Content Synthesis",
"normalizedTitle": "PoseShop: Human Image Database Construction and Personalized Content Synthesis",
"fno": "ttg2013050824",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Humans",
"Skin",
"Image Segmentation",
"Image Databases",
"Image Color Analysis",
"Shape",
"Image Composition",
"Image Database"
],
"authors": [
{
"givenName": null,
"surname": "Tao Chen",
"fullName": "Tao Chen",
"affiliation": "Dept. of Comput. Sci., Tsinghua Univ., Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Ping Tan",
"fullName": "Ping Tan",
"affiliation": "Dept. of Electr. & Comput. Eng., Nat. Univ. of Singapore, Singapore, Singapore",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Li-Qian Ma",
"fullName": "Li-Qian Ma",
"affiliation": "Dept. of Comput. Sci., Tsinghua Univ., Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Ming-Ming Cheng",
"fullName": "Ming-Ming Cheng",
"affiliation": "Dept. of Comput. Sci., Tsinghua Univ., Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "A.",
"surname": "Shamir",
"fullName": "A. Shamir",
"affiliation": "Efi Arazi Sch. of Comput. Sci., Interdiscipl. Center, Herzelia, Israel",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Shi-Min Hu",
"fullName": "Shi-Min Hu",
"affiliation": "Dept. of Comput. Sci., Tsinghua Univ., Beijing, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2013-05-01 00:00:00",
"pubType": "trans",
"pages": "824-837",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/mnrao/1994/6435/0/00346252",
"title": "Lower limb kinematics of human walking with the medial axis transformation",
"doi": null,
"abstractUrl": "/proceedings-article/mnrao/1994/00346252/12OmNBtl1sY",
"parentPublication": {
"id": "proceedings/mnrao/1994/6435/0",
"title": "Proceedings of 1994 IEEE Workshop on Motion of Non-rigid and Articulated Objects",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/enc/2009/3882/0/3882a127",
"title": "Query an Image Database by Segmentation and Content",
"doi": null,
"abstractUrl": "/proceedings-article/enc/2009/3882a127/12OmNBziB91",
"parentPublication": {
"id": "proceedings/enc/2009/3882/0",
"title": "2009 Mexican International Conference on Computer Science",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2015/9721/0/9721a663",
"title": "Fast Face Detection Based on Skin Segmentation and Facial Features",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2015/9721a663/12OmNrJAdRd",
"parentPublication": {
"id": "proceedings/sitis/2015/9721/0",
"title": "2015 11th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2009/3813/0/3813a245",
"title": "A Study of the Effect of Illumination Conditions and Color Spaces on Skin Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2009/3813a245/12OmNrkT7FX",
"parentPublication": {
"id": "proceedings/sibgrapi/2009/3813/0",
"title": "2009 XXII Brazilian Symposium on Computer Graphics and Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130379",
"title": "RGBD-HuDaAct: A color-depth video database for human daily activity recognition",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130379/12OmNyPQ4GV",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460590",
"title": "Combining contrast saliency and region discontinuity for precise hand segmentation in projector-camera system",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460590/12OmNzVoBFM",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fcst/2010/7779/0/05577329",
"title": "Skin-Anatomy Based Face Texture Image Synthesis by Skin Feature Distribution Analyzing Method",
"doi": null,
"abstractUrl": "/proceedings-article/fcst/2010/05577329/12OmNzzxuta",
"parentPublication": {
"id": "proceedings/fcst/2010/7779/0",
"title": "2010 Fifth International Conference on Frontier of Computer Science and Technology (FCST 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2019/1198/0/119800a471",
"title": "Garment Detectives: Discovering Clothes and Its Genre in Consumer Photos",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2019/119800a471/19wB1fUNWGk",
"parentPublication": {
"id": "proceedings/mipr/2019/1198/0",
"title": "2019 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itme/2022/1015/0/101500a180",
"title": "Personalized Custom Virtual Fitting Display Method",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2022/101500a180/1M4rv5Veb0k",
"parentPublication": {
"id": "proceedings/itme/2022/1015/0",
"title": "2022 12th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800h847",
"title": "Towards Photo-Realistic Virtual Try-On by Adaptively Generating↔Preserving Image Content",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800h847/1m3nGrL41va",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013050811",
"articleId": "13rRUNvgziC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013050838",
"articleId": "13rRUxC0SvU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgD9",
"name": "ttg2013050824s.avi",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013050824s.avi",
"extension": "avi",
"size": "50.8 MB",
"__typename": "WebExtraType"
},
{
"id": "17ShDTXFgDa",
"name": "ttg2013050824s.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013050824s.pdf",
"extension": "pdf",
"size": "7.96 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzvhvFS",
"title": "May",
"year": "2013",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxC0SvU",
"doi": "10.1109/TVCG.2012.159",
"abstract": "The perception of transparency and the underlying neural mechanisms have been subject to extensive research in the cognitive sciences. However, we have yet to develop visualization techniques that optimally convey the inner structure of complex transparent shapes. In this paper, we apply the findings of perception research to develop a novel illustrative rendering method that enhances surface transparency nonlocally. Rendering of transparent geometry is computationally expensive since many optimizations, such as visibility culling, are not applicable and fragments have to be sorted by depth for correct blending. In order to overcome these difficulties efficiently, we propose the illustration buffer. This novel data structure combines the ideas of the A and G-buffers to store a list of all surface layers for each pixel. A set of local and nonlocal operators is then used to process these depth-lists to generate the final image. Our technique is interactive on current graphics hardware and is only limited by the available graphics memory. Based on this framework, we present an efficient algorithm for a nonlocal transparency enhancement that creates expressive renderings of transparent surfaces. A controlled quantitative double blind user study shows that the presented approach improves the understanding of complex transparent surfaces significantly.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The perception of transparency and the underlying neural mechanisms have been subject to extensive research in the cognitive sciences. However, we have yet to develop visualization techniques that optimally convey the inner structure of complex transparent shapes. In this paper, we apply the findings of perception research to develop a novel illustrative rendering method that enhances surface transparency nonlocally. Rendering of transparent geometry is computationally expensive since many optimizations, such as visibility culling, are not applicable and fragments have to be sorted by depth for correct blending. In order to overcome these difficulties efficiently, we propose the illustration buffer. This novel data structure combines the ideas of the A and G-buffers to store a list of all surface layers for each pixel. A set of local and nonlocal operators is then used to process these depth-lists to generate the final image. Our technique is interactive on current graphics hardware and is only limited by the available graphics memory. Based on this framework, we present an efficient algorithm for a nonlocal transparency enhancement that creates expressive renderings of transparent surfaces. A controlled quantitative double blind user study shows that the presented approach improves the understanding of complex transparent surfaces significantly.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The perception of transparency and the underlying neural mechanisms have been subject to extensive research in the cognitive sciences. However, we have yet to develop visualization techniques that optimally convey the inner structure of complex transparent shapes. In this paper, we apply the findings of perception research to develop a novel illustrative rendering method that enhances surface transparency nonlocally. Rendering of transparent geometry is computationally expensive since many optimizations, such as visibility culling, are not applicable and fragments have to be sorted by depth for correct blending. In order to overcome these difficulties efficiently, we propose the illustration buffer. This novel data structure combines the ideas of the A and G-buffers to store a list of all surface layers for each pixel. A set of local and nonlocal operators is then used to process these depth-lists to generate the final image. Our technique is interactive on current graphics hardware and is only limited by the available graphics memory. Based on this framework, we present an efficient algorithm for a nonlocal transparency enhancement that creates expressive renderings of transparent surfaces. A controlled quantitative double blind user study shows that the presented approach improves the understanding of complex transparent surfaces significantly.",
"title": "Smart Transparency for Illustrative Visualization of Complex Flow Surfaces",
"normalizedTitle": "Smart Transparency for Illustrative Visualization of Complex Flow Surfaces",
"fno": "ttg2013050838",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Rendering Computer Graphics",
"Buffer Storage",
"Image Color Analysis",
"Surface Treatment",
"Indexes",
"Graphics Processing Unit",
"Perception",
"Illustrative Rendering",
"Transparency",
"Flow Visualization",
"Integral Surface",
"User Study",
"Diffusion",
"A Buffer",
"Illustration Buffer"
],
"authors": [
{
"givenName": "R.",
"surname": "Carnecky",
"fullName": "R. Carnecky",
"affiliation": "Comput. Sci. Dept., ETH Zurich, Zurich, Switzerland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "R.",
"surname": "Fuchs",
"fullName": "R. Fuchs",
"affiliation": "Comput. Sci. Dept., ETH Zurich, Zurich, Switzerland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "S.",
"surname": "Mehl",
"fullName": "S. Mehl",
"affiliation": "Fac. of Med., Phillips-Univ. of Marburg, Marburg, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Yun Jang",
"fullName": "Yun Jang",
"affiliation": "Dept. of Comput. Eng., Sejong Univ., Seoul, South Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "R.",
"surname": "Peikert",
"fullName": "R. Peikert",
"affiliation": "Comput. Sci. Dept., ETH Zurich, Zurich, Switzerland",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2013-05-01 00:00:00",
"pubType": "trans",
"pages": "838-851",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-vis/2005/2766/0/01532855",
"title": "Illustrative display of hidden iso-surface structures",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/01532855/12OmNBRsVxy",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2012/4829/0/4829a134",
"title": "Memory-Efficient Order-Independent Transparency with Dynamic Fragment Buffer",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2012/4829a134/12OmNCctfoC",
"parentPublication": {
"id": "proceedings/sibgrapi/2012/4829/0",
"title": "2012 25th SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg2010061319",
"title": "IRIS: Illustrative Rendering for Integral Surfaces",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg2010061319/13rRUIJcWlk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/08/07055322",
"title": "Disambiguating Stereoscopic Transparency Using a Thaumatrope Approach",
"doi": null,
"abstractUrl": "/journal/tg/2015/08/07055322/13rRUwghd53",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/08/ttg2011081036",
"title": "Stochastic Transparency",
"doi": null,
"abstractUrl": "/journal/tg/2011/08/ttg2011081036/13rRUxBa55X",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg2010061329",
"title": "Illustrative Stream Surfaces",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg2010061329/13rRUxcsYLM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/02/ttg2014020238",
"title": "Memory-Hazard-Aware K-Buffer Algorithm for Order-Independent Transparency Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2014/02/ttg2014020238/13rRUypp57F",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1988/03/mcg1988030029",
"title": "Display of Surfaces from Volume Data",
"doi": null,
"abstractUrl": "/magazine/cg/1988/03/mcg1988030029/13rRUyuNszl",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/08/09007507",
"title": "A Comparison of Rendering Techniques for 3D Line Sets With Transparency",
"doi": null,
"abstractUrl": "/journal/tg/2021/08/09007507/1hJKlGGBnpu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09216549",
"title": "Advanced Rendering of Line Data with Ambient Occlusion and Transparency",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09216549/1nJsKPg3YJy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013050824",
"articleId": "13rRUwI5Ug6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013050852",
"articleId": "13rRUwfZBVm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTYesQz",
"name": "ttg2013050838s.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013050838s.pdf",
"extension": "pdf",
"size": "2.47 MB",
"__typename": "WebExtraType"
},
{
"id": "17ShDTYesQA",
"name": "ttg2013050838s.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013050838s.mp4",
"extension": "mp4",
"size": "16.3 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzvhvFS",
"title": "May",
"year": "2013",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwfZBVm",
"doi": "10.1109/TVCG.2012.162",
"abstract": "A new type of deformable model is presented that merges meshes and level sets into one representation to provide interoperability between methods designed for either. This includes the ability to circumvent the CFL time step restriction for methods that require large step sizes. The key idea is to couple a constellation of disconnected triangular surface elements (springls) with a level set that tracks the moving constellation. The target application for Spring Level Sets (SpringLS) is to implement comprehensive imaging pipelines that require a mixture of deformable model representations to achieve the best performance. We demonstrate how to implement key components of a comprehensive imaging pipeline with SpringLS, including image segmentation, registration, tracking, and atlasing.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A new type of deformable model is presented that merges meshes and level sets into one representation to provide interoperability between methods designed for either. This includes the ability to circumvent the CFL time step restriction for methods that require large step sizes. The key idea is to couple a constellation of disconnected triangular surface elements (springls) with a level set that tracks the moving constellation. The target application for Spring Level Sets (SpringLS) is to implement comprehensive imaging pipelines that require a mixture of deformable model representations to achieve the best performance. We demonstrate how to implement key components of a comprehensive imaging pipeline with SpringLS, including image segmentation, registration, tracking, and atlasing.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A new type of deformable model is presented that merges meshes and level sets into one representation to provide interoperability between methods designed for either. This includes the ability to circumvent the CFL time step restriction for methods that require large step sizes. The key idea is to couple a constellation of disconnected triangular surface elements (springls) with a level set that tracks the moving constellation. The target application for Spring Level Sets (SpringLS) is to implement comprehensive imaging pipelines that require a mixture of deformable model representations to achieve the best performance. We demonstrate how to implement key components of a comprehensive imaging pipeline with SpringLS, including image segmentation, registration, tracking, and atlasing.",
"title": "Spring Level Sets: A Deformable Model Representation to Provide Interoperability between Meshes and Level Sets",
"normalizedTitle": "Spring Level Sets: A Deformable Model Representation to Provide Interoperability between Meshes and Level Sets",
"fno": "ttg2013050852",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Level Set",
"Deformable Models",
"Springs",
"Image Segmentation",
"Imaging",
"Materials",
"Computational Modeling",
"Shape Model",
"Segmentation",
"Registration",
"Tracking",
"Atlas"
],
"authors": [
{
"givenName": "B. C.",
"surname": "Lucas",
"fullName": "B. C. Lucas",
"affiliation": "Dept. of Comput. Sci., Johns Hopkins Univ., Baltimore, MD, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "M.",
"surname": "Kazhdan",
"fullName": "M. Kazhdan",
"affiliation": "Dept. of Comput. Sci., Johns Hopkins Univ., Baltimore, MD, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "R. H.",
"surname": "Taylor",
"fullName": "R. H. Taylor",
"affiliation": "Dept. of Comput. Sci., Johns Hopkins Univ., Baltimore, MD, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2013-05-01 00:00:00",
"pubType": "trans",
"pages": "852-865",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2017/1032/0/1032f496",
"title": "Non-rigid Object Tracking via Deformable Patches Using Shape-Preserved KCF and Level Sets",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032f496/12OmNBqv2pP",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2014/4435/0/4435a113",
"title": "Patient-Specific Interactive Simulation of Compression Ultrasonography",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2014/4435a113/12OmNqIhFK6",
"parentPublication": {
"id": "proceedings/cbms/2014/4435/0",
"title": "2014 IEEE 27th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2008/2242/0/04587475",
"title": "A multi-compartment segmentation framework with homeomorphic level sets",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2008/04587475/12OmNwI8cha",
"parentPublication": {
"id": "proceedings/cvpr/2008/2242/0",
"title": "2008 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209b002",
"title": "Multiphase Image Segmentation Using the Deformable Simplicial Complex Method",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209b002/12OmNxWLTqW",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2004/04/v0422",
"title": "A Streaming Narrow-Band Algorithm: Interactive Computation and Visualization of Level Sets",
"doi": null,
"abstractUrl": "/journal/tg/2004/04/v0422/13rRUwI5UfT",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/01/ttg2013010030",
"title": "Fast Sparse Level Sets on Graphics Hardware",
"doi": null,
"abstractUrl": "/journal/tg/2013/01/ttg2013010030/13rRUwfZC0g",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2006/08/i1262",
"title": "Dynamical Statistical Shape Priors for Level Set-Based Tracking",
"doi": null,
"abstractUrl": "/journal/tp/2006/08/i1262/13rRUxBrGi0",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2001/02/v0173",
"title": "A Level-Set Approach for the Metamorphosis of Solid Models",
"doi": null,
"abstractUrl": "/journal/tg/2001/02/v0173/13rRUy2YLYj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2003/06/i0755",
"title": "A Topology Preserving Level Set Method for Geometric Deformable Models",
"doi": null,
"abstractUrl": "/journal/tp/2003/06/i0755/13rRUyuvRpS",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013050838",
"articleId": "13rRUxC0SvU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013050866",
"articleId": "13rRUxBa561",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzvhvFS",
"title": "May",
"year": "2013",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxBa561",
"doi": "10.1109/TVCG.2012.160",
"abstract": "This paper surveys the field of nonphotorealistic rendering (NPR), focusing on techniques for transforming 2D input (images and video) into artistically stylized renderings. We first present a taxonomy of the 2D NPR algorithms developed over the past two decades, structured according to the design characteristics and behavior of each technique. We then describe a chronology of development from the semiautomatic paint systems of the early nineties, through to the automated painterly rendering systems of the late nineties driven by image gradient analysis. Two complementary trends in the NPR literature are then addressed, with reference to our taxonomy. First, the fusion of higher level computer vision and NPR, illustrating the trends toward scene analysis to drive artistic abstraction and diversity of style. Second, the evolution of local processing approaches toward edge-aware filtering for real-time stylization of images and video. The survey then concludes with a discussion of open challenges for 2D NPR identified in recent NPR symposia, including topics such as user and aesthetic evaluation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper surveys the field of nonphotorealistic rendering (NPR), focusing on techniques for transforming 2D input (images and video) into artistically stylized renderings. We first present a taxonomy of the 2D NPR algorithms developed over the past two decades, structured according to the design characteristics and behavior of each technique. We then describe a chronology of development from the semiautomatic paint systems of the early nineties, through to the automated painterly rendering systems of the late nineties driven by image gradient analysis. Two complementary trends in the NPR literature are then addressed, with reference to our taxonomy. First, the fusion of higher level computer vision and NPR, illustrating the trends toward scene analysis to drive artistic abstraction and diversity of style. Second, the evolution of local processing approaches toward edge-aware filtering for real-time stylization of images and video. The survey then concludes with a discussion of open challenges for 2D NPR identified in recent NPR symposia, including topics such as user and aesthetic evaluation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper surveys the field of nonphotorealistic rendering (NPR), focusing on techniques for transforming 2D input (images and video) into artistically stylized renderings. We first present a taxonomy of the 2D NPR algorithms developed over the past two decades, structured according to the design characteristics and behavior of each technique. We then describe a chronology of development from the semiautomatic paint systems of the early nineties, through to the automated painterly rendering systems of the late nineties driven by image gradient analysis. Two complementary trends in the NPR literature are then addressed, with reference to our taxonomy. First, the fusion of higher level computer vision and NPR, illustrating the trends toward scene analysis to drive artistic abstraction and diversity of style. Second, the evolution of local processing approaches toward edge-aware filtering for real-time stylization of images and video. The survey then concludes with a discussion of open challenges for 2D NPR identified in recent NPR symposia, including topics such as user and aesthetic evaluation.",
"title": "State of the \"Art”: A Taxonomy of Artistic Stylization Techniques for Images and Video",
"normalizedTitle": "State of the \"Art”: A Taxonomy of Artistic Stylization Techniques for Images and Video",
"fno": "ttg2013050866",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Rendering Computer Graphics",
"Painting",
"Image Color Analysis",
"Taxonomy",
"Algorithm Design And Analysis",
"Image Edge Detection",
"Artistic Rendering",
"Image And Video Stylization",
"Nonphotorealistic Rendering NPR"
],
"authors": [
{
"givenName": "Jan Eric",
"surname": "Kyprianidis",
"fullName": "Jan Eric Kyprianidis",
"affiliation": "Comput. Graphics Syst. Group, Univ. of Potsdam, Potsdam, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "J.",
"surname": "Collomosse",
"fullName": "J. Collomosse",
"affiliation": "Centre for Vision Speech & Signal Process. (CVSSP), Univ. of Surrey, Guildford, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Tinghuai Wang",
"fullName": "Tinghuai Wang",
"affiliation": "Centre for Vision Speech & Signal Process. (CVSSP), Univ. of Surrey, Guildford, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "T.",
"surname": "Isenberg",
"fullName": "T. Isenberg",
"affiliation": "Team Aviz-INRIA-Saclay, Univ. Paris-Sud, Orsay, France",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2013-05-01 00:00:00",
"pubType": "trans",
"pages": "866-885",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2005/2397/0/23970903",
"title": "Enhanced SIC (Synergistic Image Creator) for Artistic Use",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2005/23970903/12OmNB1wkOc",
"parentPublication": {
"id": "proceedings/iv/2005/2397/0",
"title": "Ninth International Conference on Information Visualisation (IV'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2013/2549/0/06746488",
"title": "Wormhole Canvas",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2013/06746488/12OmNBqMDEL",
"parentPublication": {
"id": "proceedings/cis/2013/2549/0",
"title": "2013 Ninth International Conference on Computational Intelligence and Security (CIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2016/9041/0/9041a391",
"title": "Best Practices in WebQuest Design: Stimulating the Higher Levels of Bloom's Taxonomy",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2016/9041a391/12OmNCeaPUM",
"parentPublication": {
"id": "proceedings/icalt/2016/9041/0",
"title": "2016 IEEE 16th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsc/2018/4210/0/421001a837",
"title": "Automatic Taxonomy Construction for Eye Colors Data without Using Context Information",
"doi": null,
"abstractUrl": "/proceedings-article/dsc/2018/421001a837/12OmNviZlCk",
"parentPublication": {
"id": "proceedings/dsc/2018/4210/0",
"title": "2018 IEEE Third International Conference on Data Science in Cyberspace (DSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvmp/2009/3893/0/3893a035",
"title": "Skin-Aware Stylization of Video Portraits",
"doi": null,
"abstractUrl": "/proceedings-article/cvmp/2009/3893a035/12OmNwMob6P",
"parentPublication": {
"id": "proceedings/cvmp/2009/3893/0",
"title": "2009 Conference for Visual Media Production",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imf/2009/3807/0/3807a054",
"title": "From the Computer Incident Taxonomy to a Computer Forensic Examination Taxonomy",
"doi": null,
"abstractUrl": "/proceedings-article/imf/2009/3807a054/12OmNyvGyk7",
"parentPublication": {
"id": "proceedings/imf/2009/3807/0",
"title": "2009 Fifth International Conference on IT Security Incident Management and IT Forensics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scivis/2015/9785/0/07429511",
"title": "A proposed multivariate visualization taxonomy from user data",
"doi": null,
"abstractUrl": "/proceedings-article/scivis/2015/07429511/12OmNzmLxKh",
"parentPublication": {
"id": "proceedings/scivis/2015/9785/0",
"title": "2015 IEEE Scientific Visualization Conference (SciVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/08/ttg2013081252",
"title": "Abstract Art by Shape Classification",
"doi": null,
"abstractUrl": "/journal/tg/2013/08/ttg2013081252/13rRUxYrbMf",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/10/06732968",
"title": "Towards Photo Watercolorization with Artistic Verisimilitude",
"doi": null,
"abstractUrl": "/journal/tg/2014/10/06732968/13rRUxYrbUI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2013/03/mcg2013030022",
"title": "Mobile Expressive Renderings: The State of the Art",
"doi": null,
"abstractUrl": "/magazine/cg/2013/03/mcg2013030022/13rRUy0ZzV1",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013050852",
"articleId": "13rRUwfZBVm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013050886",
"articleId": "13rRUwh80uy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzvhvFS",
"title": "May",
"year": "2013",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwh80uy",
"doi": "10.1109/TVCG.2012.163",
"abstract": "Spatial judgments are important for many real-world tasks in engineering and scientific visualization. While existing research provides evidence that higher levels of display and interaction fidelity in virtual reality systems offer advantages for spatial understanding, few investigations have focused on small-scale spatial judgments or employed experimental tasks similar to those used in real-world applications. After an earlier study that considered a broad analysis of various spatial understanding tasks, we present the results of a follow-up study focusing on small-scale spatial judgments. In this research, we independently controlled field of regard, stereoscopy, and head-tracked rendering to study their effects on the performance of a task involving precise spatial inspections of complex 3D structures. Measuring time and errors, we asked participants to distinguish between structural gaps and intersections between components of 3D models designed to be similar to real underground cave systems. The overall results suggest that the addition of the higher fidelity system features support performance improvements in making small-scale spatial judgments. Through analyses of the effects of individual system components, the experiment shows that participants made significantly fewer errors with either an increased field of regard or with the addition of head-tracked rendering. The results also indicate that participants performed significantly faster when the system provided the combination of stereo and head-tracked rendering.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Spatial judgments are important for many real-world tasks in engineering and scientific visualization. While existing research provides evidence that higher levels of display and interaction fidelity in virtual reality systems offer advantages for spatial understanding, few investigations have focused on small-scale spatial judgments or employed experimental tasks similar to those used in real-world applications. After an earlier study that considered a broad analysis of various spatial understanding tasks, we present the results of a follow-up study focusing on small-scale spatial judgments. In this research, we independently controlled field of regard, stereoscopy, and head-tracked rendering to study their effects on the performance of a task involving precise spatial inspections of complex 3D structures. Measuring time and errors, we asked participants to distinguish between structural gaps and intersections between components of 3D models designed to be similar to real underground cave systems. The overall results suggest that the addition of the higher fidelity system features support performance improvements in making small-scale spatial judgments. Through analyses of the effects of individual system components, the experiment shows that participants made significantly fewer errors with either an increased field of regard or with the addition of head-tracked rendering. The results also indicate that participants performed significantly faster when the system provided the combination of stereo and head-tracked rendering.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Spatial judgments are important for many real-world tasks in engineering and scientific visualization. While existing research provides evidence that higher levels of display and interaction fidelity in virtual reality systems offer advantages for spatial understanding, few investigations have focused on small-scale spatial judgments or employed experimental tasks similar to those used in real-world applications. After an earlier study that considered a broad analysis of various spatial understanding tasks, we present the results of a follow-up study focusing on small-scale spatial judgments. In this research, we independently controlled field of regard, stereoscopy, and head-tracked rendering to study their effects on the performance of a task involving precise spatial inspections of complex 3D structures. Measuring time and errors, we asked participants to distinguish between structural gaps and intersections between components of 3D models designed to be similar to real underground cave systems. The overall results suggest that the addition of the higher fidelity system features support performance improvements in making small-scale spatial judgments. Through analyses of the effects of individual system components, the experiment shows that participants made significantly fewer errors with either an increased field of regard or with the addition of head-tracked rendering. The results also indicate that participants performed significantly faster when the system provided the combination of stereo and head-tracked rendering.",
"title": "Studying the Effects of Stereo, Head Tracking, and Field of Regard on a Small-Scale Spatial Judgment Task",
"normalizedTitle": "Studying the Effects of Stereo, Head Tracking, and Field of Regard on a Small-Scale Spatial Judgment Task",
"fno": "ttg2013050886",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visualization",
"Electron Tubes",
"Navigation",
"Data Visualization",
"Head",
"Tracking",
"Rendering Computer Graphics",
"Graphical User Interfaces",
"Artificial",
"Augmented",
"And Virtual Realities"
],
"authors": [
{
"givenName": "E. D.",
"surname": "Ragan",
"fullName": "E. D. Ragan",
"affiliation": "Dept. of Comput. Sci., Virginia Tech, Blacksburg, VA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "R.",
"surname": "Kopper",
"fullName": "R. Kopper",
"affiliation": "Dept. of Comput. & Inf. Sci. & Eng., Univ. of Florida, Gainesville, FL, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "P.",
"surname": "Schuchardt",
"fullName": "P. Schuchardt",
"affiliation": "Cavewhere, Blacksburg, VA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "D. A.",
"surname": "Bowman",
"fullName": "D. A. Bowman",
"affiliation": "Dept. of Comput. Sci., Virginia Tech, Blacksburg, VA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2013-05-01 00:00:00",
"pubType": "trans",
"pages": "886-896",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2012/1204/0/06184187",
"title": "Democratizing rendering for multiple viewers in surround VR systems",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2012/06184187/12OmNBubOX9",
"parentPublication": {
"id": "proceedings/3dui/2012/1204/0",
"title": "2012 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04810998",
"title": "Image Blending and View Clustering for Multi-Viewer Immersive Projection Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04810998/12OmNCfSqFi",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2016/7258/0/07552858",
"title": "Depth augmented stereo panorama for cinematic virtual reality with head-motion parallax",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2016/07552858/12OmNs0TKW6",
"parentPublication": {
"id": "proceedings/icme/2016/7258/0",
"title": "2016 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2012/4683/0/4683a125",
"title": "Coarse Head Pose Estimation using Image Abstraction",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2012/4683a125/12OmNwE9ORM",
"parentPublication": {
"id": "proceedings/crv/2012/4683/0",
"title": "2012 Ninth Conference on Computer and Robot Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671800",
"title": "Subtle cueing for visual search in head-tracked head worn displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671800/12OmNylbovt",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/04/07384528",
"title": "Visual Quality Adjustment for Volume Rendering in a Head-Tracked Virtual Environment",
"doi": null,
"abstractUrl": "/journal/tg/2016/04/07384528/13rRUxBrGh4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08643434",
"title": "SGaze: A Data-Driven Eye-Head Coordination Model for Realtime Gaze Prediction",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08643434/18K0lRIKi7m",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a640",
"title": "Towards Eye-Perspective Rendering for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a640/1CJewzlI3CM",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600u0342",
"title": "HeadNeRF: A Realtime NeRF-based Parametric Head Model",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600u0342/1H1hITKdHGg",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090625",
"title": "Automatic Calibration of Commercial Optical See-Through Head-Mounted Displays for Medical Applications",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090625/1jIxwp2g0VO",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013050866",
"articleId": "13rRUxBa561",
"__typename": "AdjacentArticleType"
},
"next": null,
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNznkK6H",
"title": "January-February",
"year": "2004",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "10",
"label": "January-February",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyY28Yh",
"doi": "10.1109/TVCG.2004.1304817",
"abstract": "Presents the table of contents for this issue of the periodical.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Presents the table of contents for this issue of the periodical.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Presents the table of contents for this issue of the periodical.",
"title": "IEEE Transactions on Visualization and Computer Graphics - Table of contents",
"normalizedTitle": "IEEE Transactions on Visualization and Computer Graphics - Table of contents",
"fno": "01304817",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2004-01-01 00:00:00",
"pubType": "trans",
"pages": "C1",
"year": "2004",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "01304818",
"articleId": "13rRUy0qnLx",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNznkK6H",
"title": "January-February",
"year": "2004",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "10",
"label": "January-February",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUy0qnLx",
"doi": "10.1109/TVCG.2004.1304818",
"abstract": "Provides a listing of current staff, committee members and society officers.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Provides a listing of current staff, committee members and society officers.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Provides a listing of current staff, committee members and society officers.",
"title": "IEEE Transactions on Visualization and Computer Graphics",
"normalizedTitle": "IEEE Transactions on Visualization and Computer Graphics",
"fno": "01304818",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2004-01-01 00:00:00",
"pubType": "trans",
"pages": "C2",
"year": "2004",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "01304817",
"articleId": "13rRUyY28Yh",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0001",
"articleId": "13rRUxjQyhk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNznkK6H",
"title": "January-February",
"year": "2004",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "10",
"label": "January-February",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxjQyhk",
"doi": "10.1109/TVCG.2004.1260753",
"abstract": null,
"abstracts": [
{
"abstractType": "Regular",
"content": "",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": null,
"title": "Editor's Note",
"normalizedTitle": "Editor's Note",
"fno": "v0001",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "David S.",
"surname": "Ebert",
"fullName": "David S. Ebert",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2004-01-01 00:00:00",
"pubType": "trans",
"pages": "1",
"year": "2004",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2009/01/ttg2009010001",
"title": "Editor's Note",
"doi": null,
"abstractUrl": "/journal/tg/2009/01/ttg2009010001/13rRUEgs2BP",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/1999/04/e0433",
"title": "Editor's Note",
"doi": null,
"abstractUrl": "/journal/ts/1999/04/e0433/13rRUIM2VIE",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2010/01/ttb2010010001",
"title": "Editor's Note",
"doi": null,
"abstractUrl": "/journal/tb/2010/01/ttb2010010001/13rRUNvyarQ",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/1997/11/t1153",
"title": "Editor's Note",
"doi": null,
"abstractUrl": "/journal/tc/1997/11/t1153/13rRUwI5UjW",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2008/01/ttd2008010001",
"title": "Editor's Note",
"doi": null,
"abstractUrl": "/journal/td/2008/01/ttd2008010001/13rRUwInvAC",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2004/03/01264805",
"title": "Editor's note",
"doi": null,
"abstractUrl": "/journal/td/2004/03/01264805/13rRUwbs1S1",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2003/03/h0185",
"title": "Editor's Note",
"doi": null,
"abstractUrl": "/journal/tm/2003/03/h0185/13rRUx0xPJd",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2007/01/l0001",
"title": "Editor's Note",
"doi": null,
"abstractUrl": "/journal/td/2007/01/l0001/13rRUxAATg8",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/an/1983/02/man1983020161",
"title": "Editor's Note",
"doi": null,
"abstractUrl": "/magazine/an/1983/02/man1983020161/13rRUxASu5y",
"parentPublication": {
"id": "mags/an",
"title": "IEEE Annals of the History of Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2006/05/l0401",
"title": "Editor's Note",
"doi": null,
"abstractUrl": "/journal/td/2006/05/l0401/13rRUyeTVhw",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "01304818",
"articleId": "13rRUy0qnLx",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0002",
"articleId": "13rRUwwaKsV",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNznkK6H",
"title": "January-February",
"year": "2004",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "10",
"label": "January-February",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwwaKsV",
"doi": "10.1109/TVCG.2004.1260754",
"abstract": "Abstract—Simulators for dynamic systems are now widely used in various application areas and raise the need for effective and accurate flow visualization techniques. Animation allows us to depict direction, orientation, and velocity of a vector field accurately. This paper extends a former proposal for a new approach to produce perfectly cyclic and variable-speed animations for 2D steady vector fields (see [1] and [2]). A complete animation of an arbitrary number of frames is encoded in a single image. The animation can be played using the color table animation technique, which is very effective even on low-end workstations. A cyclic set of textures can be produced as well and then encoded in a common animation format or used for texture mapping on 3D objects. As compared to other approaches, the method presented in this paper produces smoother animations and is more effective, both in memory requirements to store the animation, and in computation time.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—Simulators for dynamic systems are now widely used in various application areas and raise the need for effective and accurate flow visualization techniques. Animation allows us to depict direction, orientation, and velocity of a vector field accurately. This paper extends a former proposal for a new approach to produce perfectly cyclic and variable-speed animations for 2D steady vector fields (see [1] and [2]). A complete animation of an arbitrary number of frames is encoded in a single image. The animation can be played using the color table animation technique, which is very effective even on low-end workstations. A cyclic set of textures can be produced as well and then encoded in a common animation format or used for texture mapping on 3D objects. As compared to other approaches, the method presented in this paper produces smoother animations and is more effective, both in memory requirements to store the animation, and in computation time.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—Simulators for dynamic systems are now widely used in various application areas and raise the need for effective and accurate flow visualization techniques. Animation allows us to depict direction, orientation, and velocity of a vector field accurately. This paper extends a former proposal for a new approach to produce perfectly cyclic and variable-speed animations for 2D steady vector fields (see [1] and [2]). A complete animation of an arbitrary number of frames is encoded in a single image. The animation can be played using the color table animation technique, which is very effective even on low-end workstations. A cyclic set of textures can be produced as well and then encoded in a common animation format or used for texture mapping on 3D objects. As compared to other approaches, the method presented in this paper produces smoother animations and is more effective, both in memory requirements to store the animation, and in computation time.",
"title": "High-Quality Animation of 2D Steady Vector Fields",
"normalizedTitle": "High-Quality Animation of 2D Steady Vector Fields",
"fno": "v0002",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Index Terms Flow Visualization",
"Textured Representations",
"Animation",
"Effective Techniques",
"Multimodal Visualization"
],
"authors": [
{
"givenName": "Wilfrid",
"surname": "Lefer",
"fullName": "Wilfrid Lefer",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bruno",
"surname": "Jobard",
"fullName": "Bruno Jobard",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Claire",
"surname": "Leduc",
"fullName": "Claire Leduc",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2004-01-01 00:00:00",
"pubType": "trans",
"pages": "2-14",
"year": "2004",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0001",
"articleId": "13rRUxjQyhk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0015",
"articleId": "13rRUyYBlgp",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNznkK6H",
"title": "January-February",
"year": "2004",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "10",
"label": "January-February",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyYBlgp",
"doi": "10.1109/TVCG.2004.1260755",
"abstract": "Abstract—We present an efficient stereoscopic rendering algorithm supporting interactive navigation through large-scale 3D voxel-based environments. In this algorithm, most of the pixel values of the right image are derived from the left image by a fast 3D warping based on a specific stereoscopic projection geometry. An accelerated volumetric ray casting then fills the remaining gaps in the warped right image. Our algorithm has been parallelized on a multiprocessor by employing effective task partitioning schemes and achieved a high cache coherency and load balancing. We also extend our stereoscopic rendering to include view-dependent shading and transparency effects. We have applied our algorithm in two virtual navigation systems, flythrough over terrain and virtual colonoscopy, and reached interactive stereoscopic rendering rates of more than 10 frames per second on a 16-processor SGI Challenge.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—We present an efficient stereoscopic rendering algorithm supporting interactive navigation through large-scale 3D voxel-based environments. In this algorithm, most of the pixel values of the right image are derived from the left image by a fast 3D warping based on a specific stereoscopic projection geometry. An accelerated volumetric ray casting then fills the remaining gaps in the warped right image. Our algorithm has been parallelized on a multiprocessor by employing effective task partitioning schemes and achieved a high cache coherency and load balancing. We also extend our stereoscopic rendering to include view-dependent shading and transparency effects. We have applied our algorithm in two virtual navigation systems, flythrough over terrain and virtual colonoscopy, and reached interactive stereoscopic rendering rates of more than 10 frames per second on a 16-processor SGI Challenge.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—We present an efficient stereoscopic rendering algorithm supporting interactive navigation through large-scale 3D voxel-based environments. In this algorithm, most of the pixel values of the right image are derived from the left image by a fast 3D warping based on a specific stereoscopic projection geometry. An accelerated volumetric ray casting then fills the remaining gaps in the warped right image. Our algorithm has been parallelized on a multiprocessor by employing effective task partitioning schemes and achieved a high cache coherency and load balancing. We also extend our stereoscopic rendering to include view-dependent shading and transparency effects. We have applied our algorithm in two virtual navigation systems, flythrough over terrain and virtual colonoscopy, and reached interactive stereoscopic rendering rates of more than 10 frames per second on a 16-processor SGI Challenge.",
"title": "Interactive Stereoscopic Rendering of Volumetric Environments",
"normalizedTitle": "Interactive Stereoscopic Rendering of Volumetric Environments",
"fno": "v0015",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"3 D Voxel Based Environment",
"Stereoscopic Rendering",
"Ray Casting",
"3 D Warping",
"Splatting",
"Antialiasing",
"Virtual Flythrough",
"Virtual Colonoscopy"
],
"authors": [
{
"givenName": "Ming",
"surname": "Wan",
"fullName": "Ming Wan",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Nan",
"surname": "Zhang",
"fullName": "Nan Zhang",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huamin",
"surname": "Qu",
"fullName": "Huamin Qu",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Arie E.",
"surname": "Kaufman",
"fullName": "Arie E. Kaufman",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2004-01-01 00:00:00",
"pubType": "trans",
"pages": "15-28",
"year": "2004",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0002",
"articleId": "13rRUwwaKsV",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0029",
"articleId": "13rRUyY28Yi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNznkK6H",
"title": "January-February",
"year": "2004",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "10",
"label": "January-February",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyY28Yi",
"doi": "10.1109/TVCG.2004.1260756",
"abstract": "Abstract- In this paper, we address the problem of the efficient visualization of large irregular volume data sets by exploiting a multiresolution model based on tetrahedral meshes. Multiresolution models, also called Level-Of-Detail (LOD) models, allow encoding the whole data set at a virtually continuous range of different resolutions. We have identified a set of queries for extracting meshes at variable resolution from a multiresolution model, based on field values, domain location, or opacity of the transfer function. Such queries allow trading off between resolution and speed in visualization. We define a new compact data structure for encoding a multiresolution tetrahedral mesh built through edge collapses to support selective refinement efficiently and show that such a structure has a storage cost from 3 to 5.5 times lower than standard data structures used for tetrahedral meshes. The data structures and variable resolution queries have been implemented together with state-of-the art visualization techniques in a system for the interactive visualization of three-dimensional scalar fields defined on tetrahedral meshes. Experimental results show that selective refinement queries can support interactive visualization of large data sets.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract- In this paper, we address the problem of the efficient visualization of large irregular volume data sets by exploiting a multiresolution model based on tetrahedral meshes. Multiresolution models, also called Level-Of-Detail (LOD) models, allow encoding the whole data set at a virtually continuous range of different resolutions. We have identified a set of queries for extracting meshes at variable resolution from a multiresolution model, based on field values, domain location, or opacity of the transfer function. Such queries allow trading off between resolution and speed in visualization. We define a new compact data structure for encoding a multiresolution tetrahedral mesh built through edge collapses to support selective refinement efficiently and show that such a structure has a storage cost from 3 to 5.5 times lower than standard data structures used for tetrahedral meshes. The data structures and variable resolution queries have been implemented together with state-of-the art visualization techniques in a system for the interactive visualization of three-dimensional scalar fields defined on tetrahedral meshes. Experimental results show that selective refinement queries can support interactive visualization of large data sets.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract- In this paper, we address the problem of the efficient visualization of large irregular volume data sets by exploiting a multiresolution model based on tetrahedral meshes. Multiresolution models, also called Level-Of-Detail (LOD) models, allow encoding the whole data set at a virtually continuous range of different resolutions. We have identified a set of queries for extracting meshes at variable resolution from a multiresolution model, based on field values, domain location, or opacity of the transfer function. Such queries allow trading off between resolution and speed in visualization. We define a new compact data structure for encoding a multiresolution tetrahedral mesh built through edge collapses to support selective refinement efficiently and show that such a structure has a storage cost from 3 to 5.5 times lower than standard data structures used for tetrahedral meshes. The data structures and variable resolution queries have been implemented together with state-of-the art visualization techniques in a system for the interactive visualization of three-dimensional scalar fields defined on tetrahedral meshes. Experimental results show that selective refinement queries can support interactive visualization of large data sets.",
"title": "Selective Refinement Queries for Volume Visualization of Unstructured Tetrahedral Meshes",
"normalizedTitle": "Selective Refinement Queries for Volume Visualization of Unstructured Tetrahedral Meshes",
"fno": "v0029",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Unstructured Tetrahedral Meshes",
"Volume Data Visualization",
"Multiresolution Geometric Modeling",
"Selective Refinement"
],
"authors": [
{
"givenName": "Paolo",
"surname": "Cignoni",
"fullName": "Paolo Cignoni",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Leila",
"surname": "De Floriani",
"fullName": "Leila De Floriani",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Paola",
"surname": "Magillo",
"fullName": "Paola Magillo",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Enrico",
"surname": "Puppo",
"fullName": "Enrico Puppo",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Roberto",
"surname": "Scopigno",
"fullName": "Roberto Scopigno",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2004-01-01 00:00:00",
"pubType": "trans",
"pages": "29-45",
"year": "2004",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0015",
"articleId": "13rRUyYBlgp",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0046",
"articleId": "13rRUwgQpDf",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNznkK6H",
"title": "January-February",
"year": "2004",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "10",
"label": "January-February",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwgQpDf",
"doi": "10.1109/TVCG.2004.1260757",
"abstract": "Abstract- We present an algorithm for drawing directed graphs which is based on rapidly solving a unique one-dimensional optimization problem for each of the axes. The algorithm results in a clear description of the hierarchy structure of the graph. Nodes are not restricted to lie on fixed horizontal layers, resulting in layouts that convey the symmetries of the graph very naturally. The algorithm can be applied without change to cyclic or acyclic digraphs and even to graphs containing both directed and undirected edges. We also derive a hierarchy index from the input digraph, which quantitatively measures its amount of hierarchy.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract- We present an algorithm for drawing directed graphs which is based on rapidly solving a unique one-dimensional optimization problem for each of the axes. The algorithm results in a clear description of the hierarchy structure of the graph. Nodes are not restricted to lie on fixed horizontal layers, resulting in layouts that convey the symmetries of the graph very naturally. The algorithm can be applied without change to cyclic or acyclic digraphs and even to graphs containing both directed and undirected edges. We also derive a hierarchy index from the input digraph, which quantitatively measures its amount of hierarchy.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract- We present an algorithm for drawing directed graphs which is based on rapidly solving a unique one-dimensional optimization problem for each of the axes. The algorithm results in a clear description of the hierarchy structure of the graph. Nodes are not restricted to lie on fixed horizontal layers, resulting in layouts that convey the symmetries of the graph very naturally. The algorithm can be applied without change to cyclic or acyclic digraphs and even to graphs containing both directed and undirected edges. We also derive a hierarchy index from the input digraph, which quantitatively measures its amount of hierarchy.",
"title": "Combining Hierarchy and Energy Drawing Directed Graphs",
"normalizedTitle": "Combining Hierarchy and Energy Drawing Directed Graphs",
"fno": "v0046",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Directed Graph Drawing",
"Force Directed Layout",
"Hierarchy Energy",
"Fiedler Vector",
"Minimum Linear Arrangement"
],
"authors": [
{
"givenName": "Liran",
"surname": "Carmel",
"fullName": "Liran Carmel",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "David",
"surname": "Harel",
"fullName": "David Harel",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yehuda",
"surname": "Koren",
"fullName": "Yehuda Koren",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2004-01-01 00:00:00",
"pubType": "trans",
"pages": "46-57",
"year": "2004",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0029",
"articleId": "13rRUyY28Yi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0058",
"articleId": "13rRUwdrdSp",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNznkK6H",
"title": "January-February",
"year": "2004",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "10",
"label": "January-February",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwdrdSp",
"doi": "10.1109/TVCG.2004.1260758",
"abstract": "Abstract-We propose a novel 2D representation for 3D visibility sorting, the Binary-Space-Partitioned Image (BSPI), to accelerate real-time image-based rendering. BSPI is an efficient 2D realization of a 3D BSP tree, which is commonly used in computer graphics for time-critical visibility sorting. Since the overall structure of a BSP tree is encoded in a BSPI, traversing a BSPI is comparable to traversing the corresponding BSP tree. BSPI performs visibility sorting efficiently and accurately in the 2D image space by warping the reference image triangle-by-triangle instead of pixel-by-pixel. Multiple BSPIs can be combined to solve “disocclusion,” when an occluded portion of the scene becomes visible at a novel viewpoint. Our method is highly automatic, including a tensor voting preprocessing step that generates candidate image partition lines for BSPIs, filters the noisy input data by rejecting outliers, and interpolates missing information. Our system has been applied to a variety of real data, including stereo, motion, and range images.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract-We propose a novel 2D representation for 3D visibility sorting, the Binary-Space-Partitioned Image (BSPI), to accelerate real-time image-based rendering. BSPI is an efficient 2D realization of a 3D BSP tree, which is commonly used in computer graphics for time-critical visibility sorting. Since the overall structure of a BSP tree is encoded in a BSPI, traversing a BSPI is comparable to traversing the corresponding BSP tree. BSPI performs visibility sorting efficiently and accurately in the 2D image space by warping the reference image triangle-by-triangle instead of pixel-by-pixel. Multiple BSPIs can be combined to solve “disocclusion,” when an occluded portion of the scene becomes visible at a novel viewpoint. Our method is highly automatic, including a tensor voting preprocessing step that generates candidate image partition lines for BSPIs, filters the noisy input data by rejecting outliers, and interpolates missing information. Our system has been applied to a variety of real data, including stereo, motion, and range images.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract-We propose a novel 2D representation for 3D visibility sorting, the Binary-Space-Partitioned Image (BSPI), to accelerate real-time image-based rendering. BSPI is an efficient 2D realization of a 3D BSP tree, which is commonly used in computer graphics for time-critical visibility sorting. Since the overall structure of a BSP tree is encoded in a BSPI, traversing a BSPI is comparable to traversing the corresponding BSP tree. BSPI performs visibility sorting efficiently and accurately in the 2D image space by warping the reference image triangle-by-triangle instead of pixel-by-pixel. Multiple BSPIs can be combined to solve “disocclusion,” when an occluded portion of the scene becomes visible at a novel viewpoint. Our method is highly automatic, including a tensor voting preprocessing step that generates candidate image partition lines for BSPIs, filters the noisy input data by rejecting outliers, and interpolates missing information. Our system has been applied to a variety of real data, including stereo, motion, and range images.",
"title": "Binary-Space-Partitioned Images for Resolving Image-Based Visibility",
"normalizedTitle": "Binary-Space-Partitioned Images for Resolving Image-Based Visibility",
"fno": "v0058",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visibility Sorting",
"Binary Space Partitioning",
"Image Based Rendering",
"Segmentation"
],
"authors": [
{
"givenName": "Chi-Wing",
"surname": "Fu",
"fullName": "Chi-Wing Fu",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tien-Tsin",
"surname": "Wong",
"fullName": "Tien-Tsin Wong",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wai-Shun",
"surname": "Tong",
"fullName": "Wai-Shun Tong",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chi-Keung",
"surname": "Tang",
"fullName": "Chi-Keung Tang",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Andrew J.",
"surname": "Hanson",
"fullName": "Andrew J. Hanson",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2004-01-01 00:00:00",
"pubType": "trans",
"pages": "58-71",
"year": "2004",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0046",
"articleId": "13rRUwgQpDf",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0072",
"articleId": "13rRUEgs2BK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNznkK6H",
"title": "January-February",
"year": "2004",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "10",
"label": "January-February",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUEgs2BK",
"doi": "10.1109/TVCG.2004.1260759",
"abstract": "Abstract—Visualization can provide valuable assistance for data analysis and decision making tasks. However, how people perceive and interact with a visualization tool can strongly influence their understanding of the data as well as the system's usefulness. Human factors therefore contribute significantly to the visualization process and should play an important role in the design and evaluation of visualization tools. Several research initiatives have begun to explore human factors in visualization, particularly in perception-based design. Nonetheless, visualization work involving human factors is in its infancy, and many potentially promising areas have yet to be explored. Therefore, this paper aims to 1) review known methodology for doing human factors research, with specific emphasis on visualization, 2) review current human factors research in visualization to provide a basis for future investigation, and 3) identify promising areas for future research.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—Visualization can provide valuable assistance for data analysis and decision making tasks. However, how people perceive and interact with a visualization tool can strongly influence their understanding of the data as well as the system's usefulness. Human factors therefore contribute significantly to the visualization process and should play an important role in the design and evaluation of visualization tools. Several research initiatives have begun to explore human factors in visualization, particularly in perception-based design. Nonetheless, visualization work involving human factors is in its infancy, and many potentially promising areas have yet to be explored. Therefore, this paper aims to 1) review known methodology for doing human factors research, with specific emphasis on visualization, 2) review current human factors research in visualization to provide a basis for future investigation, and 3) identify promising areas for future research.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—Visualization can provide valuable assistance for data analysis and decision making tasks. However, how people perceive and interact with a visualization tool can strongly influence their understanding of the data as well as the system's usefulness. Human factors therefore contribute significantly to the visualization process and should play an important role in the design and evaluation of visualization tools. Several research initiatives have begun to explore human factors in visualization, particularly in perception-based design. Nonetheless, visualization work involving human factors is in its infancy, and many potentially promising areas have yet to be explored. Therefore, this paper aims to 1) review known methodology for doing human factors research, with specific emphasis on visualization, 2) review current human factors research in visualization to provide a basis for future investigation, and 3) identify promising areas for future research.",
"title": "Human Factors in Visualization Research",
"normalizedTitle": "Human Factors in Visualization Research",
"fno": "v0072",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Human Factors",
"Visualization",
"Perception",
"Cognitive Support",
"Methodology"
],
"authors": [
{
"givenName": "Melanie",
"surname": "Tory",
"fullName": "Melanie Tory",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Torsten",
"surname": "M?ller",
"fullName": "Torsten M?ller",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2004-01-01 00:00:00",
"pubType": "trans",
"pages": "72-84",
"year": "2004",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0058",
"articleId": "13rRUwdrdSp",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0085",
"articleId": "13rRUxDIth5",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNznkK6H",
"title": "January-February",
"year": "2004",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "10",
"label": "January-February",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxDIth5",
"doi": "10.1109/TVCG.2004.1260760",
"abstract": "Abstract—Hierarchical B-splines have been widely used for shape modeling since their discovery by Forsey and Bartels. In this paper, we present an application of this concept, in the form of free-form deformation, to image registration by matching two images at increasing levels of detail. Results using MRI brain data are presented that demonstrate high degrees of matching while unnecessary distortions are avoided. We compare our results with the nonlinear ICP (Iterative Closest Point) algorithm (used for landmark-based registration) and optical flow (used for intensity-based registration).",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—Hierarchical B-splines have been widely used for shape modeling since their discovery by Forsey and Bartels. In this paper, we present an application of this concept, in the form of free-form deformation, to image registration by matching two images at increasing levels of detail. Results using MRI brain data are presented that demonstrate high degrees of matching while unnecessary distortions are avoided. We compare our results with the nonlinear ICP (Iterative Closest Point) algorithm (used for landmark-based registration) and optical flow (used for intensity-based registration).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—Hierarchical B-splines have been widely used for shape modeling since their discovery by Forsey and Bartels. In this paper, we present an application of this concept, in the form of free-form deformation, to image registration by matching two images at increasing levels of detail. Results using MRI brain data are presented that demonstrate high degrees of matching while unnecessary distortions are avoided. We compare our results with the nonlinear ICP (Iterative Closest Point) algorithm (used for landmark-based registration) and optical flow (used for intensity-based registration).",
"title": "Image Registration Using Hierarchical B-Splines",
"normalizedTitle": "Image Registration Using Hierarchical B-Splines",
"fno": "v0085",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Index Terms Image Registration",
"Free Form Deformation",
"Hierarchical B Splines",
"Scattered Data Approximation",
"Iterative Closest Point",
"Optical Flow"
],
"authors": [
{
"givenName": "Zhiyong",
"surname": "Xie",
"fullName": "Zhiyong Xie",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Gerald E.",
"surname": "Farin",
"fullName": "Gerald E. Farin",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2004-01-01 00:00:00",
"pubType": "trans",
"pages": "85-94",
"year": "2004",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0072",
"articleId": "13rRUEgs2BK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0095",
"articleId": "13rRUwI5TXq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNznkK6H",
"title": "January-February",
"year": "2004",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "10",
"label": "January-February",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwI5TXq",
"doi": "10.1109/TVCG.2004.1260761",
"abstract": "Abstract- Cartograms are a well-known technique for showing geography-related statistical information, such as population demographics and epidemiological data. The basic idea is to distort a map by resizing its regions according to a statistical parameter, but in a way that keeps the map recognizable. In this study, we formally define a family of cartogram drawing problems. We show that even simple variants are unsolvable in the general case. Because the feasible variants are NP-complete, heuristics are needed to solve the problem. Previously proposed solutions suffer from problems with the quality of the generated drawings. For a cartogram to be recognizable, it is important to preserve the global shape or outline of the input map, a requirement that has been overlooked in the past. To address this, our objective function for cartogram drawing includes both global and local shape preservation. To measure the degree of shape preservation, we propose a shape similarity function, which is based on a Fourier transformation of the polygons' curvatures. Also, our application is visualization of dynamic data, for which we need an algorithm that recalculates a cartogram in a few seconds. None of the previous algorithms provides adequate performance with an acceptable level of quality for this application. In this paper, we therefore propose an efficient iterative scanline algorithm to reposition edges while preserving local and global shapes. Scanlines may be generated automatically or entered interactively to guide the optimization process more closely. We apply our algorithm to several example data sets and provide a detailed comparison of the two variants of our algorithm and previous approaches.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract- Cartograms are a well-known technique for showing geography-related statistical information, such as population demographics and epidemiological data. The basic idea is to distort a map by resizing its regions according to a statistical parameter, but in a way that keeps the map recognizable. In this study, we formally define a family of cartogram drawing problems. We show that even simple variants are unsolvable in the general case. Because the feasible variants are NP-complete, heuristics are needed to solve the problem. Previously proposed solutions suffer from problems with the quality of the generated drawings. For a cartogram to be recognizable, it is important to preserve the global shape or outline of the input map, a requirement that has been overlooked in the past. To address this, our objective function for cartogram drawing includes both global and local shape preservation. To measure the degree of shape preservation, we propose a shape similarity function, which is based on a Fourier transformation of the polygons' curvatures. Also, our application is visualization of dynamic data, for which we need an algorithm that recalculates a cartogram in a few seconds. None of the previous algorithms provides adequate performance with an acceptable level of quality for this application. In this paper, we therefore propose an efficient iterative scanline algorithm to reposition edges while preserving local and global shapes. Scanlines may be generated automatically or entered interactively to guide the optimization process more closely. We apply our algorithm to several example data sets and provide a detailed comparison of the two variants of our algorithm and previous approaches.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract- Cartograms are a well-known technique for showing geography-related statistical information, such as population demographics and epidemiological data. The basic idea is to distort a map by resizing its regions according to a statistical parameter, but in a way that keeps the map recognizable. In this study, we formally define a family of cartogram drawing problems. We show that even simple variants are unsolvable in the general case. Because the feasible variants are NP-complete, heuristics are needed to solve the problem. Previously proposed solutions suffer from problems with the quality of the generated drawings. For a cartogram to be recognizable, it is important to preserve the global shape or outline of the input map, a requirement that has been overlooked in the past. To address this, our objective function for cartogram drawing includes both global and local shape preservation. To measure the degree of shape preservation, we propose a shape similarity function, which is based on a Fourier transformation of the polygons' curvatures. Also, our application is visualization of dynamic data, for which we need an algorithm that recalculates a cartogram in a few seconds. None of the previous algorithms provides adequate performance with an acceptable level of quality for this application. In this paper, we therefore propose an efficient iterative scanline algorithm to reposition edges while preserving local and global shapes. Scanlines may be generated automatically or entered interactively to guide the optimization process more closely. We apply our algorithm to several example data sets and provide a detailed comparison of the two variants of our algorithm and previous approaches.",
"title": "CartoDraw: A Fast Algorithm for Generating Contiguous Cartograms",
"normalizedTitle": "CartoDraw: A Fast Algorithm for Generating Contiguous Cartograms",
"fno": "v0095",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Information Visualization",
"Visualization Of Geo Related Information",
"Continuous Cartograms",
"Value By Area Cartograms",
"Visualization And Cartography"
],
"authors": [
{
"givenName": "Daniel A.",
"surname": "Keim",
"fullName": "Daniel A. Keim",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Stephen C.",
"surname": "North",
"fullName": "Stephen C. North",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Christian",
"surname": "Panse",
"fullName": "Christian Panse",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2004-01-01 00:00:00",
"pubType": "trans",
"pages": "95-110",
"year": "2004",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0085",
"articleId": "13rRUxDIth5",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0111",
"articleId": "13rRUygT7mJ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNznkK6H",
"title": "January-February",
"year": "2004",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "10",
"label": "January-February",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUygT7mJ",
"doi": "10.1109/TVCG.2004.10000",
"abstract": null,
"abstracts": [
{
"abstractType": "Regular",
"content": "",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": null,
"title": "Reviewer's List",
"normalizedTitle": "Reviewer's List",
"fno": "v0111",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2004-01-01 00:00:00",
"pubType": "trans",
"pages": "111-112",
"year": "2004",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0095",
"articleId": "13rRUwI5TXq",
"__typename": "AdjacentArticleType"
},
"next": null,
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyv7moM",
"title": "Jan.",
"year": "2014",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "20",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUEgarsI",
"doi": "10.1109/TVCG.2014.4",
"abstract": "IEEE Transactions on Visualization and Computer Graphics (TVCG) has published more papers in 2013 than in any previous year. TVCG continues to be in an excellent state. For the first time, the entire proceedings of IEEE VAST 2013 papers became part of the VIS special issue of TVCG. At the start of October 2013, TVCG had received more than 265 regular submissions, more than last year at the same time. This year we also observed a healthy number of 150 and 402 submissions to the IEEE VR Conference issue and the VIS conference issue that contains the Proceedings of the IEEE Information Visualization, Scientific Visualization, and Visual Analytics Science and Technology 2013 Conferences, respectively. We are expecting a total of nearly 900 submissions to TVCG by the end of 2013. A total of 137 articles were published in the first 10 regular issues with 1,769 printed pages, and the VR and VIS special issues containing 21 and 101 conference papers, respectively. All submissions in both special issues went through a rigorous two-round journalquality review process. Practically all the 2012 papers have also been decided. From the 293 regular submissions (including 20 extended versions of Best Papers from several top venues in graphics and visualization), 76 regular papers and all 20 special section papers were eventually accepted; 86 out of 333 SciVis plus InfoVis conference submissions were published in the VIS special issue. TVCG continues to offer authors a remarkably effi cient processing of submitted manuscripts: The average time from submission to fi rst decision is about three months and the average time from submission to publication as a preprint in the digital library is about seven months. Its 2012 impact factor is 1.895 with the largest number of total publications appeared two years prior. During 2013, the authors of TVCG regular papers were invited to give an oral presentation of their recent work at TVCG���s partner conferences. A total of 35 TVCG papers were presented at the IEEE Virtual Reality Conference, ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games, ACM SIGGRAPH/Eurographics Symposium on Computer Animation, Pacifi c Graphics, and IEEE VIS 2013.",
"abstracts": [
{
"abstractType": "Regular",
"content": "IEEE Transactions on Visualization and Computer Graphics (TVCG) has published more papers in 2013 than in any previous year. TVCG continues to be in an excellent state. For the first time, the entire proceedings of IEEE VAST 2013 papers became part of the VIS special issue of TVCG. At the start of October 2013, TVCG had received more than 265 regular submissions, more than last year at the same time. This year we also observed a healthy number of 150 and 402 submissions to the IEEE VR Conference issue and the VIS conference issue that contains the Proceedings of the IEEE Information Visualization, Scientific Visualization, and Visual Analytics Science and Technology 2013 Conferences, respectively. We are expecting a total of nearly 900 submissions to TVCG by the end of 2013. A total of 137 articles were published in the first 10 regular issues with 1,769 printed pages, and the VR and VIS special issues containing 21 and 101 conference papers, respectively. All submissions in both special issues went through a rigorous two-round journalquality review process. Practically all the 2012 papers have also been decided. From the 293 regular submissions (including 20 extended versions of Best Papers from several top venues in graphics and visualization), 76 regular papers and all 20 special section papers were eventually accepted; 86 out of 333 SciVis plus InfoVis conference submissions were published in the VIS special issue. TVCG continues to offer authors a remarkably effi cient processing of submitted manuscripts: The average time from submission to fi rst decision is about three months and the average time from submission to publication as a preprint in the digital library is about seven months. Its 2012 impact factor is 1.895 with the largest number of total publications appeared two years prior. During 2013, the authors of TVCG regular papers were invited to give an oral presentation of their recent work at TVCG���s partner conferences. A total of 35 TVCG papers were presented at the IEEE Virtual Reality Conference, ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games, ACM SIGGRAPH/Eurographics Symposium on Computer Animation, Pacifi c Graphics, and IEEE VIS 2013.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "IEEE Transactions on Visualization and Computer Graphics (TVCG) has published more papers in 2013 than in any previous year. TVCG continues to be in an excellent state. For the first time, the entire proceedings of IEEE VAST 2013 papers became part of the VIS special issue of TVCG. At the start of October 2013, TVCG had received more than 265 regular submissions, more than last year at the same time. This year we also observed a healthy number of 150 and 402 submissions to the IEEE VR Conference issue and the VIS conference issue that contains the Proceedings of the IEEE Information Visualization, Scientific Visualization, and Visual Analytics Science and Technology 2013 Conferences, respectively. We are expecting a total of nearly 900 submissions to TVCG by the end of 2013. A total of 137 articles were published in the first 10 regular issues with 1,769 printed pages, and the VR and VIS special issues containing 21 and 101 conference papers, respectively. All submissions in both special issues went through a rigorous two-round journalquality review process. Practically all the 2012 papers have also been decided. From the 293 regular submissions (including 20 extended versions of Best Papers from several top venues in graphics and visualization), 76 regular papers and all 20 special section papers were eventually accepted; 86 out of 333 SciVis plus InfoVis conference submissions were published in the VIS special issue. TVCG continues to offer authors a remarkably effi cient processing of submitted manuscripts: The average time from submission to fi rst decision is about three months and the average time from submission to publication as a preprint in the digital library is about seven months. Its 2012 impact factor is 1.895 with the largest number of total publications appeared two years prior. During 2013, the authors of TVCG regular papers were invited to give an oral presentation of their recent work at TVCG���s partner conferences. A total of 35 TVCG papers were presented at the IEEE Virtual Reality Conference, ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games, ACM SIGGRAPH/Eurographics Symposium on Computer Animation, Pacifi c Graphics, and IEEE VIS 2013.",
"title": "State of the Journal",
"normalizedTitle": "State of the Journal",
"fno": "ttg2014010001",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Ming C.",
"surname": "Lin",
"fullName": "Ming C. Lin",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2014-01-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "2014",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2013/4795/0/06549334",
"title": "Message from the Program Chairs",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549334/12OmNwudQMb",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/01/ttg2013010001",
"title": "Editor's Note",
"doi": null,
"abstractUrl": "/journal/tg/2013/01/ttg2013010001/13rRUNvgz4e",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06935055",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06935055/13rRUwh80He",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2014/01/ttk2014010001",
"title": "Editorial [State of the Transactions]",
"doi": null,
"abstractUrl": "/journal/tk/2014/01/ttk2014010001/13rRUx0PqpT",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08315160",
"title": "Preface",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08315160/13rRUxNW1TW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2014/01/ttp2014010001",
"title": "Editorial: State of the Journal",
"doi": null,
"abstractUrl": "/journal/tp/2014/01/ttp2014010001/13rRUxYIMWo",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/2012/02/ttc2012020145",
"title": "State of the Journal",
"doi": null,
"abstractUrl": "/journal/tc/2012/02/ttc2012020145/13rRUxcbnBM",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/06/06805680",
"title": "Editor's Note [2013 Best Associate Editor Award & 2013 Best Reviewer Award]",
"doi": null,
"abstractUrl": "/journal/tg/2014/06/06805680/13rRUy3xY2Q",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/02/08952831",
"title": "State of the Journal Editorial",
"doi": null,
"abstractUrl": "/journal/tp/2020/02/08952831/1gqpWPrYFsA",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09663062",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09663062/1zBaC3IZK9y",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "ttg2014010002",
"articleId": "13rRUwfZC0h",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyv7moM",
"title": "Jan.",
"year": "2014",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "20",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwfZC0h",
"doi": "10.1109/TVCG.2014.3",
"abstract": "This special section presents expanded versions of three of the best papers from the 11th Annual ACM SIGGRAPH/Eurographics Symposium on Computer Animation (SCA 2012), which was held in Lausanne, Switzerland, from 29-31 July 2012. SCA has established itself as the premier conference dedicated specifically to innovations in the software and technology of computer animation. SCA 2012 received 80 submissions and each submission was reviewed by at least three members of the international program committee. After a thorough online discussion, the 72-member international program committee decided on the 27 full papers and nine short presentation papers accepted for the final program. Out of 27 full papers, the symposium's Best Papers Award Committee selected one best paper, two runner-ups, and four honorable mentions. The selection was informed by the original reviews and the conference presentations. We are delighted to present three out of the six very best papers of SCA 2012 invited for this special section. Each of the invited papers contains a minimum of 30 percent new material and received at least three reviews, including one reviewer not among the original SCA reviewers.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This special section presents expanded versions of three of the best papers from the 11th Annual ACM SIGGRAPH/Eurographics Symposium on Computer Animation (SCA 2012), which was held in Lausanne, Switzerland, from 29-31 July 2012. SCA has established itself as the premier conference dedicated specifically to innovations in the software and technology of computer animation. SCA 2012 received 80 submissions and each submission was reviewed by at least three members of the international program committee. After a thorough online discussion, the 72-member international program committee decided on the 27 full papers and nine short presentation papers accepted for the final program. Out of 27 full papers, the symposium's Best Papers Award Committee selected one best paper, two runner-ups, and four honorable mentions. The selection was informed by the original reviews and the conference presentations. We are delighted to present three out of the six very best papers of SCA 2012 invited for this special section. Each of the invited papers contains a minimum of 30 percent new material and received at least three reviews, including one reviewer not among the original SCA reviewers.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This special section presents expanded versions of three of the best papers from the 11th Annual ACM SIGGRAPH/Eurographics Symposium on Computer Animation (SCA 2012), which was held in Lausanne, Switzerland, from 29-31 July 2012. SCA has established itself as the premier conference dedicated specifically to innovations in the software and technology of computer animation. SCA 2012 received 80 submissions and each submission was reviewed by at least three members of the international program committee. After a thorough online discussion, the 72-member international program committee decided on the 27 full papers and nine short presentation papers accepted for the final program. Out of 27 full papers, the symposium's Best Papers Award Committee selected one best paper, two runner-ups, and four honorable mentions. The selection was informed by the original reviews and the conference presentations. We are delighted to present three out of the six very best papers of SCA 2012 invited for this special section. Each of the invited papers contains a minimum of 30 percent new material and received at least three reviews, including one reviewer not among the original SCA reviewers.",
"title": "Guest Editor's Introduction: Special Section on the ACM SIGGRAPH/Eurographics Symposium on Computer Animation (SCA)",
"normalizedTitle": "Guest Editor's Introduction: Special Section on the ACM SIGGRAPH/Eurographics Symposium on Computer Animation (SCA)",
"fno": "ttg2014010002",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Special Issues And Sections",
"Meetings",
"Computer Graphics",
"Visualization"
],
"authors": [
{
"givenName": "Paul G.",
"surname": "Kry",
"fullName": "Paul G. Kry",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jehee",
"surname": "Lee",
"fullName": "Jehee Lee",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2014-01-01 00:00:00",
"pubType": "trans",
"pages": "2-3",
"year": "2014",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tb/2013/05/ttb2013051089",
"title": "Guest Editorial for ACM BCB",
"doi": null,
"abstractUrl": "/journal/tb/2013/05/ttb2013051089/13rRUEgs2Af",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/03/07835799",
"title": "Guest Editor's Introduction: Special Section on the ACM SIGGRAPH/Eurographics Symposium on Computer Animation (SCA)",
"doi": null,
"abstractUrl": "/journal/tg/2017/03/07835799/13rRUIIVlkm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2013/06/ttb2013061345",
"title": "Guest Editors' introduction to the special section on bioinformatics research and applications",
"doi": null,
"abstractUrl": "/journal/tb/2013/06/ttb2013061345/13rRUILLku4",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/06/ttg2013060898",
"title": "Guest Editors' Introduction: Special Section on the IEEE Pacific Visualization Symposium 2012",
"doi": null,
"abstractUrl": "/journal/tg/2013/06/ttg2013060898/13rRUNvgziD",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/06/06807545",
"title": "Guest Editor's Introduction: Special Section on the International Symposium on Mixed and Augmented Reality 2012",
"doi": null,
"abstractUrl": "/journal/tg/2014/06/06807545/13rRUwhHcQT",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2013/08/ttm2013081470",
"title": "Guest Editorial: Special Section on Outstanding Papers from MobiSys 2012",
"doi": null,
"abstractUrl": "/journal/tm/2013/08/ttm2013081470/13rRUxAATh6",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050721",
"title": "Guest Editors' Introduction: Special Section on the ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games (I3D 2012)",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050721/13rRUxBa5rV",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/10/07230338",
"title": "Guest Editor's Introduction: Special Section on the ACM SIGGRAPH/Eurographics Symposium on Computer Animation (SCA)",
"doi": null,
"abstractUrl": "/journal/tg/2015/10/07230338/13rRUxOve9L",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2014/01/ttm2014010003",
"title": "Guest Editorial: Special section on outstanding papers from MobiCom 2012",
"doi": null,
"abstractUrl": "/journal/tm/2014/01/ttm2014010003/13rRUxZ0o27",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/10/06881790",
"title": "Guest Editors' Introduction: Special Section on the ACM SIGGRAPH/Eurographics Symposium on Computer Animation (SCA)",
"doi": null,
"abstractUrl": "/journal/tg/2014/10/06881790/13rRUy0HYRq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2014010001",
"articleId": "13rRUEgarsI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2014010004",
"articleId": "13rRUxcbnCr",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyv7moM",
"title": "Jan.",
"year": "2014",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "20",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxcbnCr",
"doi": "10.1109/TVCG.2013.97",
"abstract": "In this paper, we present a method for animating multiphase flow of immiscible fluids using unstructured moving meshes. Our underlying discretization is an unstructured tetrahedral mesh, the deformable simplicial complex (DSC), that moves with the flow in a Lagrangian manner. Mesh optimization operations improve element quality and avoid element inversion. In the context of multiphase flow, we guarantee that every element is occupied by a single fluid and, consequently, the interface between fluids is represented by a set of faces in the simplicial complex. This approach ensures that the underlying discretization matches the physics and avoids the additional book-keeping required in grid-based methods where multiple fluids may occupy the same cell. Our Lagrangian approach naturally leads us to adopt a finite element approach to simulation, in contrast to the finite volume approaches adopted by a majority of fluid simulation techniques that use tetrahedral meshes. We characterize fluid simulation as an optimization problem allowing for full coupling of the pressure and velocity fields and the incorporation of a second-order surface energy. We introduce a preconditioner based on the diagonal Schur complement and solve our optimization on the GPU. We provide the results of parameter studies as well as a performance analysis of our method, together with suggestions for performance optimization.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we present a method for animating multiphase flow of immiscible fluids using unstructured moving meshes. Our underlying discretization is an unstructured tetrahedral mesh, the deformable simplicial complex (DSC), that moves with the flow in a Lagrangian manner. Mesh optimization operations improve element quality and avoid element inversion. In the context of multiphase flow, we guarantee that every element is occupied by a single fluid and, consequently, the interface between fluids is represented by a set of faces in the simplicial complex. This approach ensures that the underlying discretization matches the physics and avoids the additional book-keeping required in grid-based methods where multiple fluids may occupy the same cell. Our Lagrangian approach naturally leads us to adopt a finite element approach to simulation, in contrast to the finite volume approaches adopted by a majority of fluid simulation techniques that use tetrahedral meshes. We characterize fluid simulation as an optimization problem allowing for full coupling of the pressure and velocity fields and the incorporation of a second-order surface energy. We introduce a preconditioner based on the diagonal Schur complement and solve our optimization on the GPU. We provide the results of parameter studies as well as a performance analysis of our method, together with suggestions for performance optimization.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we present a method for animating multiphase flow of immiscible fluids using unstructured moving meshes. Our underlying discretization is an unstructured tetrahedral mesh, the deformable simplicial complex (DSC), that moves with the flow in a Lagrangian manner. Mesh optimization operations improve element quality and avoid element inversion. In the context of multiphase flow, we guarantee that every element is occupied by a single fluid and, consequently, the interface between fluids is represented by a set of faces in the simplicial complex. This approach ensures that the underlying discretization matches the physics and avoids the additional book-keeping required in grid-based methods where multiple fluids may occupy the same cell. Our Lagrangian approach naturally leads us to adopt a finite element approach to simulation, in contrast to the finite volume approaches adopted by a majority of fluid simulation techniques that use tetrahedral meshes. We characterize fluid simulation as an optimization problem allowing for full coupling of the pressure and velocity fields and the incorporation of a second-order surface energy. We introduce a preconditioner based on the diagonal Schur complement and solve our optimization on the GPU. We provide the results of parameter studies as well as a performance analysis of our method, together with suggestions for performance optimization.",
"title": "Multiphase Flow of Immiscible Fluids on Unstructured Moving Meshes",
"normalizedTitle": "Multiphase Flow of Immiscible Fluids on Unstructured Moving Meshes",
"fno": "ttg2014010004",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Animation",
"Physics",
"Fluid Flow",
"Simulation",
"Optimization",
"Optimization Methods",
"Fluid Animation",
"Physics Based Modeling"
],
"authors": [
{
"givenName": "Marek Krzysztof",
"surname": "Misztal",
"fullName": "Marek Krzysztof Misztal",
"affiliation": "Niels Bohr Inst., Univ. of Copenhagen, Copenhagen, Denmark",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kenny",
"surname": "Erleben",
"fullName": "Kenny Erleben",
"affiliation": "Niels Bohr Inst., Univ. of Copenhagen, Copenhagen, Denmark",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Adam",
"surname": "Bargteil",
"fullName": "Adam Bargteil",
"affiliation": "Datalogisk Inst., Univ. of Copenhagen, København, Denmark",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jens",
"surname": "Fursund",
"fullName": "Jens Fursund",
"affiliation": "Sch. of Comput., Univ. of Utah, Salt Lake City, UT, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Brian Bunch",
"surname": "Christensen",
"fullName": "Brian Bunch Christensen",
"affiliation": "Alexandra Instituttet, Aarhus, Denmark",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jakob",
"surname": "Andreas Baerentzen",
"fullName": "Jakob Andreas Baerentzen",
"affiliation": "Tech. Univ. of Denmark, Lyngby, Denmark",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Robert",
"surname": "Bridson",
"fullName": "Robert Bridson",
"affiliation": "Dept. of Comput. Sci., UBC, Vancouver, BC, Canada",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2014-01-01 00:00:00",
"pubType": "trans",
"pages": "4-16",
"year": "2014",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/aihas/1994/6440/0/00390502",
"title": "Fluids in a distributed interactive simulation",
"doi": null,
"abstractUrl": "/proceedings-article/aihas/1994/00390502/12OmNC4wtKv",
"parentPublication": {
"id": "proceedings/aihas/1994/6440/0",
"title": "Fifth Annual Conference on AI, and Planning in High Autonomy Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2017/2089/0/2089a118",
"title": "Anisotropic Surface Reconstruction for Multiphase Fluids",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2017/2089a118/12OmNCmpcVe",
"parentPublication": {
"id": "proceedings/cw/2017/2089/0",
"title": "2017 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccit/2009/3896/0/3896b280",
"title": "Performance-Oriented Drilling Fluids Design System with a Neural Network Approach",
"doi": null,
"abstractUrl": "/proceedings-article/iccit/2009/3896b280/12OmNvq5jFi",
"parentPublication": {
"id": "proceedings/iccit/2009/3896/0",
"title": "Convergence Information Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2017/3588/0/3588a309",
"title": "Screen Space Rendering Solution for Multiphase SPH Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2017/3588a309/12OmNvs4vmP",
"parentPublication": {
"id": "proceedings/svr/2017/3588/0",
"title": "2017 19th Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1995/7062/0/70620198",
"title": "Dynamic simulation of splashing fluids",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1995/70620198/12OmNy1SFHC",
"parentPublication": {
"id": "proceedings/ca/1995/7062/0",
"title": "Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvgip/2008/3476/0/3476a063",
"title": "Explosion Simulation Using Compressible Fluids",
"doi": null,
"abstractUrl": "/proceedings-article/icvgip/2008/3476a063/12OmNz5JBRO",
"parentPublication": {
"id": "proceedings/icvgip/2008/3476/0",
"title": "Computer Vision, Graphics & Image Processing, Indian Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/05/v0939",
"title": "Texturing Fluids",
"doi": null,
"abstractUrl": "/journal/tg/2007/05/v0939/13rRUwbs20O",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/01/ttg2010010070",
"title": "Fluid Simulation with Articulated Bodies",
"doi": null,
"abstractUrl": "/journal/tg/2010/01/ttg2010010070/13rRUxDqS8f",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/11/ttg2011111714",
"title": "Six Degrees-of-Freedom Haptic Interaction with Fluids",
"doi": null,
"abstractUrl": "/journal/tg/2011/11/ttg2011111714/13rRUxNW1Zj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1997/03/mcg1997030052",
"title": "Real-Time Fluid Simulation in a Dynamic Virtual Environment",
"doi": null,
"abstractUrl": "/magazine/cg/1997/03/mcg1997030052/13rRUyXKxU3",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2014010002",
"articleId": "13rRUwfZC0h",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2014010017",
"articleId": "13rRUB7a1fQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRXr",
"name": "ttg2014010004s.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2014010004s.zip",
"extension": "zip",
"size": "1.57 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyv7moM",
"title": "Jan.",
"year": "2014",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "20",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUB7a1fQ",
"doi": "10.1109/TVCG.2013.19",
"abstract": "We present a GPU friendly, Eulerian, free surface fluid simulation method that conserves mass locally and globally without the use of Lagrangian components. Local mass conservation prevents small-scale details of the free surface from disappearing, a problem that plagues many previous approaches, while global mass conservation ensures that the total volume of the liquid does not decrease over time. Our method handles moving solid boundaries as well as cells that are partially filled with solids. Due to its stability, it allows the use of large time steps that makes it suitable for both offline and real-time applications. We achieve this by using density-based surface tracking with a novel, unconditionally stable, conservative advection scheme. We also propose mass conserving methods to sharpen the interface and to reveal subgrid features of the liquid. While our approach conserves mass, volume loss is still possible but only temporarily. With constant mass, local volume loss causes a local increase of the density used for surface tracking which we detect and correct over time. We show the effectiveness of the proposed methods in several practical examples all running either at interactive rates or in real time.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a GPU friendly, Eulerian, free surface fluid simulation method that conserves mass locally and globally without the use of Lagrangian components. Local mass conservation prevents small-scale details of the free surface from disappearing, a problem that plagues many previous approaches, while global mass conservation ensures that the total volume of the liquid does not decrease over time. Our method handles moving solid boundaries as well as cells that are partially filled with solids. Due to its stability, it allows the use of large time steps that makes it suitable for both offline and real-time applications. We achieve this by using density-based surface tracking with a novel, unconditionally stable, conservative advection scheme. We also propose mass conserving methods to sharpen the interface and to reveal subgrid features of the liquid. While our approach conserves mass, volume loss is still possible but only temporarily. With constant mass, local volume loss causes a local increase of the density used for surface tracking which we detect and correct over time. We show the effectiveness of the proposed methods in several practical examples all running either at interactive rates or in real time.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a GPU friendly, Eulerian, free surface fluid simulation method that conserves mass locally and globally without the use of Lagrangian components. Local mass conservation prevents small-scale details of the free surface from disappearing, a problem that plagues many previous approaches, while global mass conservation ensures that the total volume of the liquid does not decrease over time. Our method handles moving solid boundaries as well as cells that are partially filled with solids. Due to its stability, it allows the use of large time steps that makes it suitable for both offline and real-time applications. We achieve this by using density-based surface tracking with a novel, unconditionally stable, conservative advection scheme. We also propose mass conserving methods to sharpen the interface and to reveal subgrid features of the liquid. While our approach conserves mass, volume loss is still possible but only temporarily. With constant mass, local volume loss causes a local increase of the density used for surface tracking which we detect and correct over time. We show the effectiveness of the proposed methods in several practical examples all running either at interactive rates or in real time.",
"title": "Mass-Conserving Eulerian Liquid Simulation",
"normalizedTitle": "Mass-Conserving Eulerian Liquid Simulation",
"fno": "ttg2014010017",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Liquids",
"Level Set",
"Interpolation",
"Computational Modeling",
"Solids",
"Mathematical Model",
"Graphics Processing Units",
"Physics Based Animation",
"Mass Conservation",
"Density Sharpening",
"Fluid Simulation"
],
"authors": [
{
"givenName": "Nuttapong",
"surname": "Chentanez",
"fullName": "Nuttapong Chentanez",
"affiliation": "Nvidia PhysX Res., Zurich, Switzerland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Matthias",
"surname": "Muller",
"fullName": "Matthias Muller",
"affiliation": "Nvidia PhysX Res., Zurich, Switzerland",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2014-01-01 00:00:00",
"pubType": "trans",
"pages": "17-29",
"year": "2014",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2017/1032/0/1032b889",
"title": "See the Glass Half Full: Reasoning About Liquid Containers, Their Volume and Content",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032b889/12OmNxy4MYd",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdl/2002/7350/0/01022745",
"title": "Surface conductivity in liquid-solid interface due to image force",
"doi": null,
"abstractUrl": "/proceedings-article/icdl/2002/01022745/12OmNxzMnWM",
"parentPublication": {
"id": "proceedings/icdl/2002/7350/0",
"title": "Proceedings of 14th International Conference on Dielectric Liquids",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ectc/2017/6315/0/07999893",
"title": "Ga Liquid Metal Embrittlement for Fine Pitch Interconnect Rework",
"doi": null,
"abstractUrl": "/proceedings-article/ectc/2017/07999893/12OmNy3AgwI",
"parentPublication": {
"id": "proceedings/ectc/2017/6315/0",
"title": "2017 IEEE 67th Electronic Components and Technology Conference (ECTC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2016/0811/0/0811a012",
"title": "Surface Tension and Wettability Modeling for Flowing Liquids",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2016/0811a012/12OmNyuya8j",
"parentPublication": {
"id": "proceedings/cgiv/2016/0811/0",
"title": "2016 13th International Conference on Computer Graphics, Imaging and Visualization (CGiV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsc/2016/1192/0/1192a489",
"title": "A Hybrid Modeling Method for Dynamic Liquid Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/dsc/2016/1192a489/12OmNzBOhHv",
"parentPublication": {
"id": "proceedings/dsc/2016/1192/0",
"title": "2016 IEEE First International Conference on Data Science in Cyberspace (DSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvd/2009/3781/0/3781a271",
"title": "Delaunay Simplexes in Liquid Cyclohexane",
"doi": null,
"abstractUrl": "/proceedings-article/isvd/2009/3781a271/12OmNzXnNCd",
"parentPublication": {
"id": "proceedings/isvd/2009/3781/0",
"title": "2009 Sixth International Symposium on Voronoi Diagrams",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/10/06747389",
"title": "Large-Scale Liquid Simulation on Adaptive Hexahedral Grids",
"doi": null,
"abstractUrl": "/journal/tg/2014/10/06747389/13rRUxYrbMj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/10/07132780",
"title": "Coupling 3D Eulerian, Heightfield and Particle Methods for Interactive Simulation of Large Scale Liquid Phenomena",
"doi": null,
"abstractUrl": "/journal/tg/2015/10/07132780/13rRUxlgy3K",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/04/08869736",
"title": "Implicit Density Projection for Volume Conserving Liquids",
"doi": null,
"abstractUrl": "/journal/tg/2021/04/08869736/1e9h4gzgnmw",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2019/2297/0/229700a163",
"title": "Simulation Controlling Method for Generating Desired Water Caustics",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2019/229700a163/1fHkljLfIVG",
"parentPublication": {
"id": "proceedings/cw/2019/2297/0",
"title": "2019 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2014010004",
"articleId": "13rRUxcbnCr",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2014010030",
"articleId": "13rRUxNW1TU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgBW",
"name": "ttg2014010017s2.avi",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2014010017s2.avi",
"extension": "avi",
"size": "7.7 MB",
"__typename": "WebExtraType"
},
{
"id": "17ShDTXFgBV",
"name": "ttg2014010017s1.avi",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2014010017s1.avi",
"extension": "avi",
"size": "25.2 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyv7moM",
"title": "Jan.",
"year": "2014",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "20",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxNW1TU",
"doi": "10.1109/TVCG.2013.98",
"abstract": "In complex scenes with many objects, collision detection plays a key role in the simulation performance. This is particularly true in fracture simulation for two main reasons. One is that fracture fragments tend to exhibit very intensive contact, and the other is that collision detection data structures for new fragments need to be computed on the fly. In this paper, we present novel collision detection algorithms and data structures for real-time simulation of fracturing rigid bodies. We build on a combination of well-known efficient data structures, namely, distance fields and sphere trees, making our algorithm easy to integrate on existing simulation engines. We propose novel methods to construct these data structures, such that they can be efficiently updated upon fracture events and integrated in a simple yet effective self-adapting contact selection algorithm. Altogether, we drastically reduce the cost of both collision detection and collision response. We have evaluated our global solution for collision detection on challenging scenarios, achieving high frame rates suited for hard real-time applications such as video games or haptics. Our solution opens promising perspectives for complex fracture simulations involving many dynamically created rigid objects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In complex scenes with many objects, collision detection plays a key role in the simulation performance. This is particularly true in fracture simulation for two main reasons. One is that fracture fragments tend to exhibit very intensive contact, and the other is that collision detection data structures for new fragments need to be computed on the fly. In this paper, we present novel collision detection algorithms and data structures for real-time simulation of fracturing rigid bodies. We build on a combination of well-known efficient data structures, namely, distance fields and sphere trees, making our algorithm easy to integrate on existing simulation engines. We propose novel methods to construct these data structures, such that they can be efficiently updated upon fracture events and integrated in a simple yet effective self-adapting contact selection algorithm. Altogether, we drastically reduce the cost of both collision detection and collision response. We have evaluated our global solution for collision detection on challenging scenarios, achieving high frame rates suited for hard real-time applications such as video games or haptics. Our solution opens promising perspectives for complex fracture simulations involving many dynamically created rigid objects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In complex scenes with many objects, collision detection plays a key role in the simulation performance. This is particularly true in fracture simulation for two main reasons. One is that fracture fragments tend to exhibit very intensive contact, and the other is that collision detection data structures for new fragments need to be computed on the fly. In this paper, we present novel collision detection algorithms and data structures for real-time simulation of fracturing rigid bodies. We build on a combination of well-known efficient data structures, namely, distance fields and sphere trees, making our algorithm easy to integrate on existing simulation engines. We propose novel methods to construct these data structures, such that they can be efficiently updated upon fracture events and integrated in a simple yet effective self-adapting contact selection algorithm. Altogether, we drastically reduce the cost of both collision detection and collision response. We have evaluated our global solution for collision detection on challenging scenarios, achieving high frame rates suited for hard real-time applications such as video games or haptics. Our solution opens promising perspectives for complex fracture simulations involving many dynamically created rigid objects.",
"title": "Fast Collision Detection for Fracturing Rigid Bodies",
"normalizedTitle": "Fast Collision Detection for Fracturing Rigid Bodies",
"fno": "ttg2014010030",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Computational Modeling",
"Data Structures",
"Surface Cracks",
"Approximation Methods",
"Detection Algorithms",
"Data Models",
"Approximation Algorithms",
"Rigid Body",
"Physical Simulation",
"Collision Detection",
"Fracture"
],
"authors": [
{
"givenName": "Loeiz",
"surname": "Glondu",
"fullName": "Loeiz Glondu",
"affiliation": "IRISA, Inria, Rennes, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sara C.",
"surname": "Schvartzman",
"fullName": "Sara C. Schvartzman",
"affiliation": "Univ. Rey Juan Carlos, Mostoles, Spain",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Maud",
"surname": "Marchal",
"fullName": "Maud Marchal",
"affiliation": "IRISA, Inria, Rennes, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Georges",
"surname": "Dumont",
"fullName": "Georges Dumont",
"affiliation": "IRISA, Inria, Rennes, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Miguel A.",
"surname": "Otaduy",
"fullName": "Miguel A. Otaduy",
"affiliation": "Univ. Rey Juan Carlos, Mostoles, Spain",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2014-01-01 00:00:00",
"pubType": "trans",
"pages": "30-41",
"year": "2014",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2015/8391/0/8391c138",
"title": "3D Fragment Reassembly Using Integrated Template Guidance and Fracture-Region Matching",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391c138/12OmNCcKQCl",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccrd/2010/4043/0/4043a007",
"title": "Fast Traversal Algorithm for Detecting Object Interference Using Hierarchical Representation between Rigid Bodies",
"doi": null,
"abstractUrl": "/proceedings-article/iccrd/2010/4043a007/12OmNvEhfYT",
"parentPublication": {
"id": "proceedings/iccrd/2010/4043/0",
"title": "Computer Research and Development, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nicoint/2017/5332/0/5332a057",
"title": "A Study of Assembly Navigation Operation with 2-D Panel for Restoring Fractured Objects",
"doi": null,
"abstractUrl": "/proceedings-article/nicoint/2017/5332a057/12OmNwCJOQ1",
"parentPublication": {
"id": "proceedings/nicoint/2017/5332/0",
"title": "2017 Nicograph International (NicoInt)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icccnt/2014/2696/0/06963004",
"title": "Virtual merging of fractured fragments based on constraint cluster",
"doi": null,
"abstractUrl": "/proceedings-article/icccnt/2014/06963004/12OmNwErpBv",
"parentPublication": {
"id": "proceedings/icccnt/2014/2696/0",
"title": "2014 5th International Conference on Computing, Communication and Networking Technologies (ICCCNT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034c934",
"title": "Geometry Based Faceting of 3D Digitized Archaeological Fragments",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034c934/12OmNwK7obr",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2007/0905/0/04161009",
"title": "Balanced Hierarchies for Collision Detection between Fracturing Objects",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2007/04161009/12OmNyQYttV",
"parentPublication": {
"id": "proceedings/vr/2007/0905/0",
"title": "2007 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2009/4442/0/05457502",
"title": "Virtual 3D bone fracture reconstruction via inter-fragmentary surface alignment",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2009/05457502/12OmNzvQHMO",
"parentPublication": {
"id": "proceedings/iccvw/2009/4442/0",
"title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/02/v0370",
"title": "Fracturing Rigid Materials",
"doi": null,
"abstractUrl": "/journal/tg/2007/02/v0370/13rRUwfZC07",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/03/v0365",
"title": "Dynamic Simulation of Articulated Rigid Bodies with Contact and Collision",
"doi": null,
"abstractUrl": "/journal/tg/2006/03/v0365/13rRUx0xPTI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/06/ttg2013060991",
"title": "Interpenetration Free Simulation of Thin Shell Rigid Bodies",
"doi": null,
"abstractUrl": "/journal/tg/2013/06/ttg2013060991/13rRUygT7yb",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2014010017",
"articleId": "13rRUB7a1fQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2014010042",
"articleId": "13rRUwjGoG2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRN3",
"name": "ttg2014010030s.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2014010030s.mp4",
"extension": "mp4",
"size": "22.6 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyv7moM",
"title": "Jan.",
"year": "2014",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "20",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwjGoG2",
"doi": "10.1109/TVCG.2013.100",
"abstract": "Striking a careful balance among coverage, occlusion, and complexity is a resounding theme in the visual understanding of large and complex three-dimensional flow fields. In this paper, we present a novel deformation framework for focus+context streamline visualization that reduces occlusion and clutter around the focal regions while compacting the context region in a full view. Unlike existing techniques that vary streamline densities, we advocate a different approach that manipulates streamline positions. This is achieved by partitioning the flow field's volume space into blocks and deforming the blocks to guide streamline repositioning. We formulate block expansion and block smoothing into energy terms and solve for a deformed grid that minimizes the objective function under the volume boundary and edge flipping constraints. Leveraging a GPU linear system solver, we demonstrate interactive focus+context visualization with 3D flow field data of various characteristics. Compared to the fisheye focus+context technique, our method can magnify multiple streamlines of focus in different regions simultaneously while minimizing the distortion through optimized deformation. Both automatic and manual feature specifications are provided for flexible focus selection and effective visualization.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Striking a careful balance among coverage, occlusion, and complexity is a resounding theme in the visual understanding of large and complex three-dimensional flow fields. In this paper, we present a novel deformation framework for focus+context streamline visualization that reduces occlusion and clutter around the focal regions while compacting the context region in a full view. Unlike existing techniques that vary streamline densities, we advocate a different approach that manipulates streamline positions. This is achieved by partitioning the flow field's volume space into blocks and deforming the blocks to guide streamline repositioning. We formulate block expansion and block smoothing into energy terms and solve for a deformed grid that minimizes the objective function under the volume boundary and edge flipping constraints. Leveraging a GPU linear system solver, we demonstrate interactive focus+context visualization with 3D flow field data of various characteristics. Compared to the fisheye focus+context technique, our method can magnify multiple streamlines of focus in different regions simultaneously while minimizing the distortion through optimized deformation. Both automatic and manual feature specifications are provided for flexible focus selection and effective visualization.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Striking a careful balance among coverage, occlusion, and complexity is a resounding theme in the visual understanding of large and complex three-dimensional flow fields. In this paper, we present a novel deformation framework for focus+context streamline visualization that reduces occlusion and clutter around the focal regions while compacting the context region in a full view. Unlike existing techniques that vary streamline densities, we advocate a different approach that manipulates streamline positions. This is achieved by partitioning the flow field's volume space into blocks and deforming the blocks to guide streamline repositioning. We formulate block expansion and block smoothing into energy terms and solve for a deformed grid that minimizes the objective function under the volume boundary and edge flipping constraints. Leveraging a GPU linear system solver, we demonstrate interactive focus+context visualization with 3D flow field data of various characteristics. Compared to the fisheye focus+context technique, our method can magnify multiple streamlines of focus in different regions simultaneously while minimizing the distortion through optimized deformation. Both automatic and manual feature specifications are provided for flexible focus selection and effective visualization.",
"title": "A Deformation Framework for Focus+Context Flow Visualization",
"normalizedTitle": "A Deformation Framework for Focus+Context Flow Visualization",
"fno": "ttg2014010042",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Entropy",
"Visualization",
"Smoothing Methods",
"Data Visualization",
"Context",
"Manuals",
"Shape",
"Optimized Deformation",
"Flow Visualization",
"Focus Context Visualization"
],
"authors": [
{
"givenName": null,
"surname": "Jun Tao",
"fullName": "Jun Tao",
"affiliation": "Dept. of Comput. Sci., Michigan Technol. Univ., Houghton, MI, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Chaoli Wang",
"fullName": "Chaoli Wang",
"affiliation": "Dept. of Comput. Sci., Michigan Technol. Univ., Houghton, MI, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Ching-Kuang Shene",
"fullName": "Ching-Kuang Shene",
"affiliation": "Dept. of Comput. Sci., Michigan Technol. Univ., Houghton, MI, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Seung Hyun Kim",
"fullName": "Seung Hyun Kim",
"affiliation": "Dept. of Mech. Eng.-Eng. Mech., Michigan Technol. Univ., Townsend, MA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2014-01-01 00:00:00",
"pubType": "trans",
"pages": "42-55",
"year": "2014",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-infovis/2000/0804/0/08040085",
"title": "Redefining the Focus and Context of Focus+Context Visualizations",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/2000/08040085/12OmNB0nW9E",
"parentPublication": {
"id": "proceedings/ieee-infovis/2000/0804/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-infovis/1999/0431/0/04310053",
"title": "A Framework for Focus+Context Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/1999/04310053/12OmNBhpS0L",
"parentPublication": {
"id": "proceedings/ieee-infovis/1999/0431/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2014/4677/0/4677a145",
"title": "Multilevel Focus+Context Visualization Using Balanced Multiresolution",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2014/4677a145/12OmNwGZNGM",
"parentPublication": {
"id": "proceedings/cw/2014/4677/0",
"title": "2014 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isda/2008/3382/1/3382a368",
"title": "Intelligent Focus+Context Volume Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/isda/2008/3382a368/12OmNy5R3sk",
"parentPublication": {
"id": "proceedings/isda/2008/3382/1",
"title": "Intelligent Systems Design and Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cadgraphics/2011/4497/0/4497a389",
"title": "2.5D Focus+Context Map Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/cadgraphics/2011/4497a389/12OmNyKrHen",
"parentPublication": {
"id": "proceedings/cadgraphics/2011/4497/0",
"title": "Computer-Aided Design and Computer Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2015/6879/0/07156349",
"title": "Interactive streamline exploration and manipulation using deformation",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2015/07156349/12OmNznkJUv",
"parentPublication": {
"id": "proceedings/pacificvis/2015/6879/0",
"title": "2015 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/07/07332955",
"title": "View-Dependent Streamline Deformation and Exploration",
"doi": null,
"abstractUrl": "/journal/tg/2016/07/07332955/13rRUB7a1fV",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/02/ttg2011020171",
"title": "Feature-Preserving Volume Data Reduction and Focus+Context Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2011/02/ttg2011020171/13rRUx0xPTN",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/05/v0893",
"title": "Outlier-Preserving Focus+Context Visualization in Parallel Coordinates",
"doi": null,
"abstractUrl": "/journal/tg/2006/05/v0893/13rRUx0xPmS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/05/07120994",
"title": "Multiperspective Focus+Context Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2016/05/07120994/13rRUyft7D5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2014010030",
"articleId": "13rRUxNW1TU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2014010056",
"articleId": "13rRUx0Pqpy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXnFu8",
"name": "ttg2014010042s.wmv",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2014010042s.wmv",
"extension": "wmv",
"size": "32.4 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyv7moM",
"title": "Jan.",
"year": "2014",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "20",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUx0Pqpy",
"doi": "10.1109/TVCG.2013.104",
"abstract": "Area-proportional Euler diagrams representing three sets are commonly used to visualize the results of medical experiments, business data, and information from other applications where statistical results are best shown using interlinking curves. Currently, there is no tool that will reliably visualize exact area-proportional diagrams for up to three sets. Limited success, in terms of diagram accuracy, has been achieved for a small number of cases, such as Venn-2 and Venn-3 where all intersections between the sets must be represented. Euler diagrams do not have to include all intersections and so permit the visualization of cases where some intersections have a zero value. This paper describes a general, implemented, method for visualizing all 40 Euler-3 diagrams in an area-proportional manner. We provide techniques for generating the curves with circles and convex polygons, analyze the drawability of data with these shapes, and give a mechanism for deciding whether such data can be drawn with circles. For the cases where non-convex curves are necessary, our method draws an appropriate diagram using non-convex polygons. Thus, we are now always able to automatically visualize data for up to three sets.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Area-proportional Euler diagrams representing three sets are commonly used to visualize the results of medical experiments, business data, and information from other applications where statistical results are best shown using interlinking curves. Currently, there is no tool that will reliably visualize exact area-proportional diagrams for up to three sets. Limited success, in terms of diagram accuracy, has been achieved for a small number of cases, such as Venn-2 and Venn-3 where all intersections between the sets must be represented. Euler diagrams do not have to include all intersections and so permit the visualization of cases where some intersections have a zero value. This paper describes a general, implemented, method for visualizing all 40 Euler-3 diagrams in an area-proportional manner. We provide techniques for generating the curves with circles and convex polygons, analyze the drawability of data with these shapes, and give a mechanism for deciding whether such data can be drawn with circles. For the cases where non-convex curves are necessary, our method draws an appropriate diagram using non-convex polygons. Thus, we are now always able to automatically visualize data for up to three sets.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Area-proportional Euler diagrams representing three sets are commonly used to visualize the results of medical experiments, business data, and information from other applications where statistical results are best shown using interlinking curves. Currently, there is no tool that will reliably visualize exact area-proportional diagrams for up to three sets. Limited success, in terms of diagram accuracy, has been achieved for a small number of cases, such as Venn-2 and Venn-3 where all intersections between the sets must be represented. Euler diagrams do not have to include all intersections and so permit the visualization of cases where some intersections have a zero value. This paper describes a general, implemented, method for visualizing all 40 Euler-3 diagrams in an area-proportional manner. We provide techniques for generating the curves with circles and convex polygons, analyze the drawability of data with these shapes, and give a mechanism for deciding whether such data can be drawn with circles. For the cases where non-convex curves are necessary, our method draws an appropriate diagram using non-convex polygons. Thus, we are now always able to automatically visualize data for up to three sets.",
"title": "Drawing Area-Proportional Euler Diagrams Representing Up To Three Sets",
"normalizedTitle": "Drawing Area-Proportional Euler Diagrams Representing Up To Three Sets",
"fno": "ttg2014010056",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Decision Support Systems",
"Concurrent Computing",
"Abstracts",
"Business",
"Reliability",
"Accuracy",
"Venn Diagrams",
"Data Visualization",
"Decision Support Systems",
"Concurrent Computing",
"Abstracts",
"Business",
"Reliability",
"Accuracy",
"Information Visualization",
"Euler Diagrams"
],
"authors": [
{
"givenName": "Peter",
"surname": "Rodgers",
"fullName": "Peter Rodgers",
"affiliation": "Sch. of Comput., Univ. of Kent, Canterbury, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Gem",
"surname": "Stapleton",
"fullName": "Gem Stapleton",
"affiliation": "Sch. of Comput., Univ. of Brighton, Brighton, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jean",
"surname": "Flower",
"fullName": "Jean Flower",
"affiliation": "Sch. of Comput., Univ. of Brighton, Brighton, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "John",
"surname": "Howse",
"fullName": "John Howse",
"affiliation": "Sch. of Comput., Univ. of Brighton, Brighton, UK",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2014-01-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "2014",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vlhcc/2004/8696/0/86960147",
"title": "Dynamic Euler Diagram Drawing",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2004/86960147/12OmNAgoV6r",
"parentPublication": {
"id": "proceedings/vlhcc/2004/8696/0",
"title": "Proceedings. 2004 IEEE Symposium on Visual Languages and Human Centric Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vl/2000/0840/0/08400119",
"title": "Projections in Venn-Euler Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/vl/2000/08400119/12OmNB8Cj3l",
"parentPublication": {
"id": "proceedings/vl/2000/0840/0",
"title": "Visual Languages, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vl/1999/0216/0/02160138",
"title": "Reasoning with Spider Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/vl/1999/02160138/12OmNBQkwZS",
"parentPublication": {
"id": "proceedings/vl/1999/0216/0",
"title": "Visual Languages, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2014/4035/0/06883063",
"title": "Properties of euler diagrams and graphs in combination",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2014/06883063/12OmNvA1hE8",
"parentPublication": {
"id": "proceedings/vlhcc/2014/4035/0",
"title": "2014 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2011/1246/0/06070401",
"title": "Drawing Euler diagrams with circles and ellipses",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2011/06070401/12OmNvpew49",
"parentPublication": {
"id": "proceedings/vlhcc/2011/1246/0",
"title": "2011 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2017/3870/0/3870a243",
"title": "Automatic Assessment of Student Answers Consisting of Venn and Euler Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2017/3870a243/12OmNwF0BKj",
"parentPublication": {
"id": "proceedings/icalt/2017/3870/0",
"title": "2017 IEEE 17th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2008/3268/0/3268a585",
"title": "Embedding Wellformed Euler Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2008/3268a585/12OmNyuya3M",
"parentPublication": {
"id": "proceedings/iv/2008/3268/0",
"title": "2008 12th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/07/ttg2011071020",
"title": "Drawing Euler Diagrams with Circles: The Theory of Piercings",
"doi": null,
"abstractUrl": "/journal/tg/2011/07/ttg2011071020/13rRUEgarBq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg2010061090",
"title": "Untangling Euler Diagrams",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg2010061090/13rRUILtJm3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/01/ttg2011010088",
"title": "Inductively Generating Euler Diagrams",
"doi": null,
"abstractUrl": "/journal/tg/2011/01/ttg2011010088/13rRUNvgziB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2014010042",
"articleId": "13rRUwjGoG2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2014010070",
"articleId": "13rRUEgs2M1",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgQk",
"name": "ttg2014010056s.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2014010056s.pdf",
"extension": "pdf",
"size": "424 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyv7moM",
"title": "Jan.",
"year": "2014",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "20",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUEgs2M1",
"doi": "10.1109/TVCG.2013.96",
"abstract": "This paper describes a new volume rendering system for spectral/hp finite-element methods that has as its goal to be both accurate and interactive. Even though high-order finite element methods are commonly used by scientists and engineers, there are few visualization methods designed to display this data directly. Consequently, visualizations of high-order data are generally created by first sampling the high-order field onto a regular grid and then generating the visualization via traditional methods based on linear interpolation. This approach, however, introduces error into the visualization pipeline and requires the user to balance image quality, interactivity, and resource consumption. We first show that evaluation of the volume rendering integral, when applied to the composition of piecewise-smooth transfer functions with the high-order scalar field, typically exhibits second-order convergence for a wide range of high-order quadrature schemes, and has worst case first-order convergence. This result provides bounds on the ability to achieve high-order convergence to the volume rendering integral. We then develop an algorithm for optimized evaluation of the volume rendering integral, based on the categorization of each ray according to the local behavior of the field and transfer function. We demonstrate the effectiveness of our system by running performance benchmarks on several high-order fluid-flow simulations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper describes a new volume rendering system for spectral/hp finite-element methods that has as its goal to be both accurate and interactive. Even though high-order finite element methods are commonly used by scientists and engineers, there are few visualization methods designed to display this data directly. Consequently, visualizations of high-order data are generally created by first sampling the high-order field onto a regular grid and then generating the visualization via traditional methods based on linear interpolation. This approach, however, introduces error into the visualization pipeline and requires the user to balance image quality, interactivity, and resource consumption. We first show that evaluation of the volume rendering integral, when applied to the composition of piecewise-smooth transfer functions with the high-order scalar field, typically exhibits second-order convergence for a wide range of high-order quadrature schemes, and has worst case first-order convergence. This result provides bounds on the ability to achieve high-order convergence to the volume rendering integral. We then develop an algorithm for optimized evaluation of the volume rendering integral, based on the categorization of each ray according to the local behavior of the field and transfer function. We demonstrate the effectiveness of our system by running performance benchmarks on several high-order fluid-flow simulations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper describes a new volume rendering system for spectral/hp finite-element methods that has as its goal to be both accurate and interactive. Even though high-order finite element methods are commonly used by scientists and engineers, there are few visualization methods designed to display this data directly. Consequently, visualizations of high-order data are generally created by first sampling the high-order field onto a regular grid and then generating the visualization via traditional methods based on linear interpolation. This approach, however, introduces error into the visualization pipeline and requires the user to balance image quality, interactivity, and resource consumption. We first show that evaluation of the volume rendering integral, when applied to the composition of piecewise-smooth transfer functions with the high-order scalar field, typically exhibits second-order convergence for a wide range of high-order quadrature schemes, and has worst case first-order convergence. This result provides bounds on the ability to achieve high-order convergence to the volume rendering integral. We then develop an algorithm for optimized evaluation of the volume rendering integral, based on the categorization of each ray according to the local behavior of the field and transfer function. We demonstrate the effectiveness of our system by running performance benchmarks on several high-order fluid-flow simulations.",
"title": "GPU-Based Volume Visualization from High-Order Finite Element Fields",
"normalizedTitle": "GPU-Based Volume Visualization from High-Order Finite Element Fields",
"fno": "ttg2014010070",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Volume Measurement",
"Rendering Computer Graphics",
"Spectral Analysis",
"Finite Element Analysis",
"Graphics Processing Units",
"GPU Ray Tracing",
"Volume Visualization",
"High Order Finite Element Methods",
"Spectral Hp Elements"
],
"authors": [
{
"givenName": "Blake",
"surname": "Nelson",
"fullName": "Blake Nelson",
"affiliation": "Space Dynamics Lab., Utah State Univ. Res. Found., Logan, UT, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Robert M.",
"surname": "Kirby",
"fullName": "Robert M. Kirby",
"affiliation": "Sch. of Comput., Univ. of Utah, Salt Lake City, UT, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Robert",
"surname": "Haimes",
"fullName": "Robert Haimes",
"affiliation": "Dept. of Aeronaut. & Astronaut., MIT, Cambridge, MA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2014-01-01 00:00:00",
"pubType": "trans",
"pages": "70-83",
"year": "2014",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-vis/2003/2030/0/20300038",
"title": "Acceleration Techniques for GPU-based Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2003/20300038/12OmNC2xhD8",
"parentPublication": {
"id": "proceedings/ieee-vis/2003/2030/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcse/2010/4303/2/4303b325",
"title": "A Combined Finite-Element and Finite-Volume Method in Reservoir Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/wcse/2010/4303b325/12OmNCb3ftt",
"parentPublication": {
"id": "proceedings/wcse/2010/4303/2",
"title": "2010 Second World Congress on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcse/2013/2882/0/06754283",
"title": "A Flexible Transfer Function Model for the Volume Rendering of Finite Element Data",
"doi": null,
"abstractUrl": "/proceedings-article/wcse/2013/06754283/12OmNx3q6V3",
"parentPublication": {
"id": "proceedings/wcse/2013/2882/0",
"title": "2013 Fourth World Congress on Software Engineering (WCSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isda/2008/3382/1/3382a368",
"title": "Intelligent Focus+Context Volume Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/isda/2008/3382a368/12OmNy5R3sk",
"parentPublication": {
"id": "proceedings/isda/2008/3382/1",
"title": "Intelligent Systems Design and Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/01532808",
"title": "Scale-invariant volume rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/01532808/12OmNyoAA5X",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1997/8262/0/82620213",
"title": "Efficient subdivision of finite-element datasets into consistent tetrahedra",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1997/82620213/12OmNzG4gA4",
"parentPublication": {
"id": "proceedings/ieee-vis/1997/8262/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010140",
"title": "Verifying Volume Rendering Using Discretization Error Analysis",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010140/13rRUwInvB6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg2011121795",
"title": "Extinction-Based Shading and Illumination in GPU Volume Ray-Casting",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg2011121795/13rRUwkxc5o",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122325",
"title": "ElVis: A System for the Accurate and Interactive Visualization of High-Order Finite Element Solutions",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122325/13rRUxD9h56",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2001/03/v0253",
"title": "Volume Illustration: Nonphotorealistic Rendering of Volume Models",
"doi": null,
"abstractUrl": "/journal/tg/2001/03/v0253/13rRUxbTMyH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2014010056",
"articleId": "13rRUx0Pqpy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2014010084",
"articleId": "13rRUxBa562",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyv7moM",
"title": "Jan.",
"year": "2014",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "20",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxBa562",
"doi": "10.1109/TVCG.2013.81",
"abstract": "We present Grouper: an all-in-one compact file format, random-access data structure, and streamable representation for large triangle meshes. Similarly to the recently published SQuad representation, Grouper represents the geometry and connectivity of a mesh by grouping vertices and triangles into fixed-size records, most of which store two adjacent triangles and a shared vertex. Unlike SQuad, however, Grouper interleaves geometry with connectivity and uses a new connectivity representation to ensure that vertices and triangles can be stored in a coherent order that enables memory-efficient sequential stream processing. We present a linear-time construction algorithm that allows streaming out Grouper meshes using a small memory footprint while preserving the initial ordering of vertices. As a part of this construction, we show how the problem of assigning vertices and triangles to groups reduces to a well-known NP-hard optimization problem, and present a simple yet effective heuristic solution that performs well in practice. Our array-based Grouper representation also doubles as a triangle mesh data structure that allows direct access to vertices and triangles. Storing only about two integer references per triangleâi.e., less than the three vertex references stored with each triangle in a conventional indexed mesh format-Grouper answers both incidence and adjacency queries in amortized constant time. Our compact representation enables data-parallel processing on multicore computers, instant partitioning and fast transmission for distributed processing, as well as efficient out-of-core access. We demonstrate the versatility and performance benefits of Grouper using a suite of example meshes and processing kernels.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present Grouper: an all-in-one compact file format, random-access data structure, and streamable representation for large triangle meshes. Similarly to the recently published SQuad representation, Grouper represents the geometry and connectivity of a mesh by grouping vertices and triangles into fixed-size records, most of which store two adjacent triangles and a shared vertex. Unlike SQuad, however, Grouper interleaves geometry with connectivity and uses a new connectivity representation to ensure that vertices and triangles can be stored in a coherent order that enables memory-efficient sequential stream processing. We present a linear-time construction algorithm that allows streaming out Grouper meshes using a small memory footprint while preserving the initial ordering of vertices. As a part of this construction, we show how the problem of assigning vertices and triangles to groups reduces to a well-known NP-hard optimization problem, and present a simple yet effective heuristic solution that performs well in practice. Our array-based Grouper representation also doubles as a triangle mesh data structure that allows direct access to vertices and triangles. Storing only about two integer references per triangleâi.e., less than the three vertex references stored with each triangle in a conventional indexed mesh format-Grouper answers both incidence and adjacency queries in amortized constant time. Our compact representation enables data-parallel processing on multicore computers, instant partitioning and fast transmission for distributed processing, as well as efficient out-of-core access. We demonstrate the versatility and performance benefits of Grouper using a suite of example meshes and processing kernels.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present Grouper: an all-in-one compact file format, random-access data structure, and streamable representation for large triangle meshes. Similarly to the recently published SQuad representation, Grouper represents the geometry and connectivity of a mesh by grouping vertices and triangles into fixed-size records, most of which store two adjacent triangles and a shared vertex. Unlike SQuad, however, Grouper interleaves geometry with connectivity and uses a new connectivity representation to ensure that vertices and triangles can be stored in a coherent order that enables memory-efficient sequential stream processing. We present a linear-time construction algorithm that allows streaming out Grouper meshes using a small memory footprint while preserving the initial ordering of vertices. As a part of this construction, we show how the problem of assigning vertices and triangles to groups reduces to a well-known NP-hard optimization problem, and present a simple yet effective heuristic solution that performs well in practice. Our array-based Grouper representation also doubles as a triangle mesh data structure that allows direct access to vertices and triangles. Storing only about two integer references per triangleâi.e., less than the three vertex references stored with each triangle in a conventional indexed mesh format-Grouper answers both incidence and adjacency queries in amortized constant time. Our compact representation enables data-parallel processing on multicore computers, instant partitioning and fast transmission for distributed processing, as well as efficient out-of-core access. We demonstrate the versatility and performance benefits of Grouper using a suite of example meshes and processing kernels.",
"title": "Grouper: A Compact, Streamable Triangle Mesh Data Structure",
"normalizedTitle": "Grouper: A Compact, Streamable Triangle Mesh Data Structure",
"fno": "ttg2014010084",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Random Processes",
"Mathematical Model",
"File Formats",
"Mesh Generation",
"Algorithm Design And Analysis",
"Large Meshes",
"Mesh Compression",
"Mesh Data Structures",
"Random Access",
"Out Of Core Algorithms"
],
"authors": [
{
"givenName": "Mark",
"surname": "Luffel",
"fullName": "Mark Luffel",
"affiliation": "Graphics, Visualization, & Usability Center (GVU), Georgia Inst. of Technol., Atlanta, GA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Topraj",
"surname": "Gurung",
"fullName": "Topraj Gurung",
"affiliation": "Graphics, Visualization, & Usability Center (GVU), Georgia Inst. of Technol., Atlanta, GA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Peter",
"surname": "Lindstrom",
"fullName": "Peter Lindstrom",
"affiliation": "Lawrence Livermore Nat. Lab., Lawrence, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jarek",
"surname": "Rossignac",
"fullName": "Jarek Rossignac",
"affiliation": "Graphics, Visualization, & Usability Center (GVU), Georgia Inst. of Technol., Atlanta, GA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2014-01-01 00:00:00",
"pubType": "trans",
"pages": "84-98",
"year": "2014",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/gmp/2002/1674/0/16740119",
"title": "Fair Triangle Mesh Generation with Discrete Elastica",
"doi": null,
"abstractUrl": "/proceedings-article/gmp/2002/16740119/12OmNAhxjFi",
"parentPublication": {
"id": "proceedings/gmp/2002/1674/0",
"title": "Geometric Modeling and Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isise/2008/3494/2/3494b414",
"title": "A Tetrahedral Mesh Generation Algorithm from Medical Images",
"doi": null,
"abstractUrl": "/proceedings-article/isise/2008/3494b414/12OmNBUS7cC",
"parentPublication": {
"id": "proceedings/isise/2008/3494/2",
"title": "2008 International Symposium on Information Science and Engineering (ISISE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccsit/2008/3308/0/3308a669",
"title": "A Hybrid Approach to Surface Segmentation of Sparse Triangle Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/iccsit/2008/3308a669/12OmNBp52IP",
"parentPublication": {
"id": "proceedings/iccsit/2008/3308/0",
"title": "2008 International Conference on Computer Science and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1997/8262/0/82620379",
"title": "Smooth hierarchical surface triangulations",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1997/82620379/12OmNqH9hgj",
"parentPublication": {
"id": "proceedings/ieee-vis/1997/8262/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgi/2004/2171/0/21710278",
"title": "Memory Efficient Adjacent Triangle Connectivity of a Vertex Using Triangle Strips",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/2004/21710278/12OmNvA1h9i",
"parentPublication": {
"id": "proceedings/cgi/2004/2171/0",
"title": "Proceedings. Computer Graphics International",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/2000/0868/0/08680235",
"title": "Efficient Coding of Non-Triangular Mesh Connectivity",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2000/08680235/12OmNwdtwaQ",
"parentPublication": {
"id": "proceedings/pg/2000/0868/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2012/4899/0/4899a432",
"title": "2D Shape Manipulation Using Equilateral Triangle Mesh",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2012/4899a432/12OmNxETa4O",
"parentPublication": {
"id": "proceedings/icdh/2012/4899/0",
"title": "4th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vg/2005/26/0/01500538",
"title": "Robust generation of signed distance fields from triangle meshes",
"doi": null,
"abstractUrl": "/proceedings-article/vg/2005/01500538/12OmNzT7Otj",
"parentPublication": {
"id": "proceedings/vg/2005/26/0",
"title": "Volume Graphics 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/01/ttg2013010045",
"title": "Linear Correlations between Spatial and Normal Noise in Triangle Meshes",
"doi": null,
"abstractUrl": "/journal/tg/2013/01/ttg2013010045/13rRUxASuGj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2014010070",
"articleId": "13rRUEgs2M1",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2014010099",
"articleId": "13rRUxBrGgW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTYesPS",
"name": "ttg2014010084s.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2014010084s.pdf",
"extension": "pdf",
"size": "10.5 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyv7moM",
"title": "Jan.",
"year": "2014",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "20",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxBrGgW",
"doi": "10.1109/TVCG.2013.94",
"abstract": "Edge-based tracking is a fast and plausible approach for textureless 3D object tracking, but its robustness is still very challenging in highly cluttered backgrounds due to numerous local minima. To overcome this problem, we propose a novel method for fast and robust textureless 3D object tracking in highly cluttered backgrounds. The proposed method is based on optimal local searching of 3D-2D correspondences between a known 3D object model and 2D scene edges in an image with heavy background clutter. In our searching scheme, searching regions are partitioned into three levels (interior, contour, and exterior) with respect to the previous object region, and confident searching directions are determined by evaluating candidates of correspondences on their region levels; thus, the correspondences are searched among likely candidates in only the confident directions instead of searching through all candidates. To ensure the confident searching direction, we also adopt the region appearance, which is efficiently modeled on a newly defined local space (called a searching bundle). Experimental results and performance evaluations demonstrate that our method fully supports fast and robust textureless 3D object tracking even in highly cluttered backgrounds.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Edge-based tracking is a fast and plausible approach for textureless 3D object tracking, but its robustness is still very challenging in highly cluttered backgrounds due to numerous local minima. To overcome this problem, we propose a novel method for fast and robust textureless 3D object tracking in highly cluttered backgrounds. The proposed method is based on optimal local searching of 3D-2D correspondences between a known 3D object model and 2D scene edges in an image with heavy background clutter. In our searching scheme, searching regions are partitioned into three levels (interior, contour, and exterior) with respect to the previous object region, and confident searching directions are determined by evaluating candidates of correspondences on their region levels; thus, the correspondences are searched among likely candidates in only the confident directions instead of searching through all candidates. To ensure the confident searching direction, we also adopt the region appearance, which is efficiently modeled on a newly defined local space (called a searching bundle). Experimental results and performance evaluations demonstrate that our method fully supports fast and robust textureless 3D object tracking even in highly cluttered backgrounds.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Edge-based tracking is a fast and plausible approach for textureless 3D object tracking, but its robustness is still very challenging in highly cluttered backgrounds due to numerous local minima. To overcome this problem, we propose a novel method for fast and robust textureless 3D object tracking in highly cluttered backgrounds. The proposed method is based on optimal local searching of 3D-2D correspondences between a known 3D object model and 2D scene edges in an image with heavy background clutter. In our searching scheme, searching regions are partitioned into three levels (interior, contour, and exterior) with respect to the previous object region, and confident searching directions are determined by evaluating candidates of correspondences on their region levels; thus, the correspondences are searched among likely candidates in only the confident directions instead of searching through all candidates. To ensure the confident searching direction, we also adopt the region appearance, which is efficiently modeled on a newly defined local space (called a searching bundle). Experimental results and performance evaluations demonstrate that our method fully supports fast and robust textureless 3D object tracking even in highly cluttered backgrounds.",
"title": "Optimal Local Searching for Fast and Robust Textureless 3D Object Tracking in Highly Cluttered Backgrounds",
"normalizedTitle": "Optimal Local Searching for Fast and Robust Textureless 3D Object Tracking in Highly Cluttered Backgrounds",
"fno": "ttg2014010099",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Three Dimensional Displays",
"Object Detection",
"Tracking",
"Edge Detection",
"Region Knowledge",
"Edge Based Tracking",
"Model Based Tracking",
"Background Clutter",
"Local Searching"
],
"authors": [
{
"givenName": null,
"surname": "Byung-Kuk Seo",
"fullName": "Byung-Kuk Seo",
"affiliation": "Dept. of Electron. & Comput. Eng., Hanyang Univ., Seoul, South Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Hanhoon Park",
"fullName": "Hanhoon Park",
"affiliation": "Dept. of Electron. Eng., Pukyong Nat. Univ., Busan, South Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Jong-Il Park",
"fullName": "Jong-Il Park",
"affiliation": "Dept. of Electron. & Comput. Eng., Hanyang Univ., Seoul, South Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Stefan",
"surname": "Hinterstoisser",
"fullName": "Stefan Hinterstoisser",
"affiliation": "Dept. of Comput. Aided Med. Procedures (CAMP), Tech. Univ. Munchen, Garching, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Slobodan",
"surname": "Ilic",
"fullName": "Slobodan Ilic",
"affiliation": "Dept. of Comput. Aided Med. Procedures (CAMP), Tech. Univ. Munchen, Garching, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2014-01-01 00:00:00",
"pubType": "trans",
"pages": "99-110",
"year": "2014",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/fg/2011/9140/0/05771428",
"title": "Prop-free pointing detection in dynamic cluttered environments",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2011/05771428/12OmNAoUTky",
"parentPublication": {
"id": "proceedings/fg/2011/9140/0",
"title": "Face and Gesture 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2014/7000/1/7000a179",
"title": "Fast 3D Object Alignment from Depth Image with 3D Fourier Moment Matching on GPU",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2014/7000a179/12OmNBOCWxW",
"parentPublication": {
"id": "3dv/2014/7000/1",
"title": "2014 2nd International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851f639",
"title": "Motion from Structure (MfS): Searching for 3D Objects in Cluttered Point Trajectories",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851f639/12OmNxA3Z6n",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851a562",
"title": "Detection and Accurate Localization of Circular Fiducials under Highly Challenging Conditions",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851a562/12OmNyxXlju",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391e391",
"title": "A Novel Representation of Parts for Accurate 3D Object Detection and Tracking in Monocular Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391e391/12OmNzYNNab",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761614",
"title": "Double-edge-model based character stroke extraction from complex backgrounds",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761614/12OmNzd7bG3",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2014/11/06787078",
"title": "3D Object Recognition in Cluttered Scenes with Local Surface Features: A Survey",
"doi": null,
"abstractUrl": "/journal/tp/2014/11/06787078/13rRUxYrbVR",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2012/05/06042881",
"title": "Gradient Response Maps for Real-Time Detection of Textureless Objects",
"doi": null,
"abstractUrl": "/journal/tp/2012/05/06042881/13rRUxZRbpi",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a360",
"title": "6DoF Pose Estimation with Object Cutout based on a Deep Autoencoder",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a360/1gysmAGFrlC",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300c080",
"title": "Silhouette-Assisted 3D Object Instance Reconstruction from a Cluttered Scene",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300c080/1i5mva7fXQ4",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2014010084",
"articleId": "13rRUxBa562",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2014010111",
"articleId": "13rRUyYSWkY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRMB",
"name": "ttg2014010099s.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2014010099s.zip",
"extension": "zip",
"size": "14.4 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyv7moM",
"title": "Jan.",
"year": "2014",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "20",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyYSWkY",
"doi": "10.1109/TVCG.2013.103",
"abstract": "Image resizing can be more effectively achieved with a better understanding of image semantics. In this paper, similar patterns that exist in many real-world images. are analyzed. By interactively detecting similar objects in an image, the image content can be summarized rather than simply distorted or cropped. This method enables the manipulation of image pixels or patches as well as semantic objects in the scene during image resizing process. Given the special nature of similar objects in a general image, the integration of a novel object carving operator with the multi-operator framework is proposed for summarizing similar objects. The object removal sequence in the summarization strategy directly affects resizing quality. The method by which to evaluate the visual importance of the object as well as to optimally select the candidates for object carving is demonstrated. To achieve practical resizing applications for general images, a template matching-based method is developed. This method can detect similar objects even when they are of various colors, transformed in terms of perspective, or partially occluded. To validate the proposed method, comparisons with state-of-the-art resizing techniques and a user study were conducted. Convincing visual results are shown to demonstrate the effectiveness of the proposed method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Image resizing can be more effectively achieved with a better understanding of image semantics. In this paper, similar patterns that exist in many real-world images. are analyzed. By interactively detecting similar objects in an image, the image content can be summarized rather than simply distorted or cropped. This method enables the manipulation of image pixels or patches as well as semantic objects in the scene during image resizing process. Given the special nature of similar objects in a general image, the integration of a novel object carving operator with the multi-operator framework is proposed for summarizing similar objects. The object removal sequence in the summarization strategy directly affects resizing quality. The method by which to evaluate the visual importance of the object as well as to optimally select the candidates for object carving is demonstrated. To achieve practical resizing applications for general images, a template matching-based method is developed. This method can detect similar objects even when they are of various colors, transformed in terms of perspective, or partially occluded. To validate the proposed method, comparisons with state-of-the-art resizing techniques and a user study were conducted. Convincing visual results are shown to demonstrate the effectiveness of the proposed method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Image resizing can be more effectively achieved with a better understanding of image semantics. In this paper, similar patterns that exist in many real-world images. are analyzed. By interactively detecting similar objects in an image, the image content can be summarized rather than simply distorted or cropped. This method enables the manipulation of image pixels or patches as well as semantic objects in the scene during image resizing process. Given the special nature of similar objects in a general image, the integration of a novel object carving operator with the multi-operator framework is proposed for summarizing similar objects. The object removal sequence in the summarization strategy directly affects resizing quality. The method by which to evaluate the visual importance of the object as well as to optimally select the candidates for object carving is demonstrated. To achieve practical resizing applications for general images, a template matching-based method is developed. This method can detect similar objects even when they are of various colors, transformed in terms of perspective, or partially occluded. To validate the proposed method, comparisons with state-of-the-art resizing techniques and a user study were conducted. Convincing visual results are shown to demonstrate the effectiveness of the proposed method.",
"title": "Summarization-Based Image Resizing by Intelligent Object Carving",
"normalizedTitle": "Summarization-Based Image Resizing by Intelligent Object Carving",
"fno": "ttg2014010111",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Image Color Analysis",
"Visualization",
"Semantics",
"Object Detection",
"Image Resolution",
"Feature Extraction",
"General",
"Shape",
"Image Color Analysis",
"Visualization",
"Semantics",
"Object Detection",
"Image Resolution",
"Feature Extraction",
"Image Representation"
],
"authors": [
{
"givenName": null,
"surname": "Weiming Dong",
"fullName": "Weiming Dong",
"affiliation": "Nat. Lab. of Pattern Recognition (NLPR), Inst. of Autom., Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Ning Zhou",
"fullName": "Ning Zhou",
"affiliation": "Nat. Lab. of Pattern Recognition (NLPR), Inst. of Autom., Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Tong-Yee Lee",
"fullName": "Tong-Yee Lee",
"affiliation": "Dept. of Comput. Sci. & Inf. Eng., Nat. Cheng-Kung Univ., Tainan, Taiwan",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Fuzhang Wu",
"fullName": "Fuzhang Wu",
"affiliation": "Nat. Lab. of Pattern Recognition (NLPR), Inst. of Autom., Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Yan Kong",
"fullName": "Yan Kong",
"affiliation": "Nat. Lab. of Pattern Recognition (NLPR), Inst. of Autom., Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Xiaopeng Zhang",
"fullName": "Xiaopeng Zhang",
"affiliation": "Nat. Lab. of Pattern Recognition (NLPR), Inst. of Autom., Beijing, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2014-01-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "2014",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/pdcat/2016/5081/0/07943389",
"title": "Accumulative Energy-Based Seam Carving for Image Resizing",
"doi": null,
"abstractUrl": "/proceedings-article/pdcat/2016/07943389/12OmNAXglK1",
"parentPublication": {
"id": "proceedings/pdcat/2016/5081/0",
"title": "2016 17th International Conference on Parallel and Distributed Computing, Applications and Technologies (PDCAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/007P1A07",
"title": "Scene warping: Layer-based stereoscopic image resizing",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/007P1A07/12OmNAiFI8D",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2009/5949/0/05407481",
"title": "Fast seam carving using partial update and divide and conquer method",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2009/05407481/12OmNCbU33t",
"parentPublication": {
"id": "proceedings/isspit/2009/5949/0",
"title": "2009 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aipr/2008/3125/0/04906465",
"title": "Hyper-spectral content aware resizing",
"doi": null,
"abstractUrl": "/proceedings-article/aipr/2008/04906465/12OmNs0C9We",
"parentPublication": {
"id": "proceedings/aipr/2008/3125/0",
"title": "2008 37th IEEE Applied Imagery Pattern Recognition Workshop",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpads/2014/7615/0/07097861",
"title": "Optimizing Seam Carving on multi-GPU systems for real-time image resizing",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2014/07097861/12OmNsbGvDS",
"parentPublication": {
"id": "proceedings/icpads/2014/7615/0",
"title": "2014 20th IEEE International Conference on Parallel and Distributed Systems (ICPADS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2014/4311/0/4311a082",
"title": "Seam Carving for Color-Plus-Depth 3D Image",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2014/4311a082/12OmNwDj0Y7",
"parentPublication": {
"id": "proceedings/ism/2014/4311/0",
"title": "2014 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trustcom/2012/4745/0/4745a596",
"title": "Image Resizing Based on Geometry Preservation with Seam Carving",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom/2012/4745a596/12OmNwdbV2X",
"parentPublication": {
"id": "proceedings/trustcom/2012/4745/0",
"title": "2012 IEEE 11th International Conference on Trust, Security and Privacy in Computing and Communications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607613",
"title": "Content-aware image resizing using perceptual seam carving with human attention model",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607613/12OmNxGALgl",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2011/4541/0/4541a050",
"title": "Reverse Seam Carving",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2011/4541a050/12OmNxjjEkK",
"parentPublication": {
"id": "proceedings/icig/2011/4541/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2013/2576/0/06815054",
"title": "Saliency-Aware Volume Data Resizing by Surface Carving",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2013/06815054/12OmNyUFfUQ",
"parentPublication": {
"id": "proceedings/cad-graphics/2013/2576/0",
"title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2014010099",
"articleId": "13rRUxBrGgW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2014010125",
"articleId": "13rRUxNEqPT",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTYesW9",
"name": "ttg2014010111s.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2014010111s.pdf",
"extension": "pdf",
"size": "11.6 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyv7moM",
"title": "Jan.",
"year": "2014",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "20",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxNEqPT",
"doi": "10.1109/TVCG.2013.86",
"abstract": "Efficient text visualization in head-worn augmented reality (AR) displays is critical because it is sensitive to display technology, text style and color, ambient illumination and so on. The main problem for the developer is to know the optimal text style for the specific display and for applications where color coding must be strictly followed because it is regulated by laws or internal practices. In this work, we experimented the effects on readability of two head-worn devices (optical and video see-through), two backgrounds (light and dark), five colors (white, black, red, green, and blue), and two text styles (plain text and billboarded text). Font type and size were kept constant. We measured the performance of 15 subjects by collecting about 5,000 measurements using a specific test application and followed by qualitative interviews. Readability turned out to be quicker on the optical see-through device. For the video see-through device, background affects readability only in case of text without billboard. Finally, our tests suggest that a good combination for indoor augmented reality applications, regardless of device and background, could be white text and blue billboard, while a mandatory color should be displayed as billboard with a white text message.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Efficient text visualization in head-worn augmented reality (AR) displays is critical because it is sensitive to display technology, text style and color, ambient illumination and so on. The main problem for the developer is to know the optimal text style for the specific display and for applications where color coding must be strictly followed because it is regulated by laws or internal practices. In this work, we experimented the effects on readability of two head-worn devices (optical and video see-through), two backgrounds (light and dark), five colors (white, black, red, green, and blue), and two text styles (plain text and billboarded text). Font type and size were kept constant. We measured the performance of 15 subjects by collecting about 5,000 measurements using a specific test application and followed by qualitative interviews. Readability turned out to be quicker on the optical see-through device. For the video see-through device, background affects readability only in case of text without billboard. Finally, our tests suggest that a good combination for indoor augmented reality applications, regardless of device and background, could be white text and blue billboard, while a mandatory color should be displayed as billboard with a white text message.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Efficient text visualization in head-worn augmented reality (AR) displays is critical because it is sensitive to display technology, text style and color, ambient illumination and so on. The main problem for the developer is to know the optimal text style for the specific display and for applications where color coding must be strictly followed because it is regulated by laws or internal practices. In this work, we experimented the effects on readability of two head-worn devices (optical and video see-through), two backgrounds (light and dark), five colors (white, black, red, green, and blue), and two text styles (plain text and billboarded text). Font type and size were kept constant. We measured the performance of 15 subjects by collecting about 5,000 measurements using a specific test application and followed by qualitative interviews. Readability turned out to be quicker on the optical see-through device. For the video see-through device, background affects readability only in case of text without billboard. Finally, our tests suggest that a good combination for indoor augmented reality applications, regardless of device and background, could be white text and blue billboard, while a mandatory color should be displayed as billboard with a white text message.",
"title": "Text Readability in Head-Worn Displays: Color and Style Optimization in Video versus Optical See-Through Devices",
"normalizedTitle": "Text Readability in Head-Worn Displays: Color and Style Optimization in Video versus Optical See-Through Devices",
"fno": "ttg2014010125",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Augmented Reality",
"Optical Filters",
"Text Processing",
"Computer Vision",
"Vision I O",
"Augmented Reality",
"Optical See Through",
"Style Guides",
"Video See Through"
],
"authors": [
{
"givenName": "Saverio",
"surname": "Debernardis",
"fullName": "Saverio Debernardis",
"affiliation": "Dept. of Mech., Math. & Manage. (DMMM), Polytech. Inst. of Bari, Bari, Italy",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Michele",
"surname": "Fiorentino",
"fullName": "Michele Fiorentino",
"affiliation": "Dept. of Mech., Math. & Manage. (DMMM), Polytech. Inst. of Bari, Bari, Italy",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Michele",
"surname": "Gattullo",
"fullName": "Michele Gattullo",
"affiliation": "Dept. of Mech., Math. & Manage. (DMMM), Polytech. Inst. of Bari, Bari, Italy",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Giuseppe",
"surname": "Monno",
"fullName": "Giuseppe Monno",
"affiliation": "Dept. of Mech., Math. & Manage. (DMMM), Polytech. Inst. of Bari, Bari, Italy",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Antonio Emmanuele",
"surname": "Uva",
"fullName": "Antonio Emmanuele Uva",
"affiliation": "Dept. of Mech., Math. & Manage. (DMMM), Polytech. Inst. of Bari, Bari, Italy",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2014-01-01 00:00:00",
"pubType": "trans",
"pages": "125-139",
"year": "2014",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cw/2017/2089/0/2089a033",
"title": "User Friendly Calibration for Tracking of Optical Stereo See-Through Head Worn Displays for Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2017/2089a033/12OmNAfPISE",
"parentPublication": {
"id": "proceedings/cw/2017/2089/0",
"title": "2017 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504749",
"title": "SharpView: Improved clarity of defocussed content on optical see-through head-mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504749/12OmNBBhN9g",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2004/2191/0/21910016",
"title": "A Compact Optical See-Through Head-Worn Display with Occlusion Support",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2004/21910016/12OmNqFrGwu",
"parentPublication": {
"id": "proceedings/ismar/2004/2191/0",
"title": "Third IEEE and ACM International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a202",
"title": "[POSTER] BrightView: Increasing Perceived Brightness in Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a202/12OmNqI04YU",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a130",
"title": "[POSTER] Two-Step Gamut Mapping for Optical See-Through Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a130/12OmNvA1hoC",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iwar/1999/0359/0/03590075",
"title": "A Method for Calibrating See-Through Head-Mounted Displays for AR",
"doi": null,
"abstractUrl": "/proceedings-article/iwar/1999/03590075/12OmNxTVU20",
"parentPublication": {
"id": "proceedings/iwar/1999/0359/0",
"title": "Augmented Reality, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446058",
"title": "User Preference for SharpView-Enhanced Virtual Text During Non-Fixated Viewing",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446058/13bd1ftOBCG",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/05/06994851",
"title": "Effect of Text Outline and Contrast Polarity on AR Text Readability in Industrial Lighting",
"doi": null,
"abstractUrl": "/journal/tg/2015/05/06994851/13rRUwghd9a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2015/02/mcg2015020052",
"title": "Legibility in Industrial AR: Text Style, Color Coding, and Illuminance",
"doi": null,
"abstractUrl": "/magazine/cg/2015/02/mcg2015020052/13rRUxjQyxG",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a649",
"title": "Comparing World and Screen Coordinate Systems in Optical See-Through Head-Mounted Displays for Text Readability while Walking",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a649/1pysvKFdazS",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2014010111",
"articleId": "13rRUyYSWkY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2014010140",
"articleId": "13rRUwInvB6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyv7moM",
"title": "Jan.",
"year": "2014",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "20",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwInvB6",
"doi": "10.1109/TVCG.2013.90",
"abstract": "We propose an approach for verification of volume rendering correctness based on an analysis of the volume rendering integral, the basis of most DVR algorithms. With respect to the most common discretization of this continuous model (Riemann summation), we make assumptions about the impact of parameter changes on the rendered results and derive convergence curves describing the expected behavior. Specifically, we progressively refine the number of samples along the ray, the grid size, and the pixel size, and evaluate how the errors observed during refinement compare against the expected approximation errors. We derive the theoretical foundations of our verification approach, explain how to realize it in practice, and discuss its limitations. We also report the errors identified by our approach when applied to two publicly available volume rendering packages.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose an approach for verification of volume rendering correctness based on an analysis of the volume rendering integral, the basis of most DVR algorithms. With respect to the most common discretization of this continuous model (Riemann summation), we make assumptions about the impact of parameter changes on the rendered results and derive convergence curves describing the expected behavior. Specifically, we progressively refine the number of samples along the ray, the grid size, and the pixel size, and evaluate how the errors observed during refinement compare against the expected approximation errors. We derive the theoretical foundations of our verification approach, explain how to realize it in practice, and discuss its limitations. We also report the errors identified by our approach when applied to two publicly available volume rendering packages.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose an approach for verification of volume rendering correctness based on an analysis of the volume rendering integral, the basis of most DVR algorithms. With respect to the most common discretization of this continuous model (Riemann summation), we make assumptions about the impact of parameter changes on the rendered results and derive convergence curves describing the expected behavior. Specifically, we progressively refine the number of samples along the ray, the grid size, and the pixel size, and evaluate how the errors observed during refinement compare against the expected approximation errors. We derive the theoretical foundations of our verification approach, explain how to realize it in practice, and discuss its limitations. We also report the errors identified by our approach when applied to two publicly available volume rendering packages.",
"title": "Verifying Volume Rendering Using Discretization Error Analysis",
"normalizedTitle": "Verifying Volume Rendering Using Discretization Error Analysis",
"fno": "ttg2014010140",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Rendering Computer Graphics",
"Volume Measurements",
"Error Analysis",
"Testing",
"Discretization Errors",
"Volume Rendering",
"Verifiable Visualization",
"Verification"
],
"authors": [
{
"givenName": "Tiago",
"surname": "Etiene",
"fullName": "Tiago Etiene",
"affiliation": "Sch. of Comput., Univ. of Utah, Salt Lake City, UT, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Daniel",
"surname": "Jonsson",
"fullName": "Daniel Jonsson",
"affiliation": "Campus Norrkoping, Linkopings Univ., Norrkoping, Sweden",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Timo",
"surname": "Ropinski",
"fullName": "Timo Ropinski",
"affiliation": "Campus Norrkoping, Linkopings Univ., Norrkoping, Sweden",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Carlos",
"surname": "Scheidegger",
"fullName": "Carlos Scheidegger",
"affiliation": "AT&T Labs.-Res., Florham Park, NJ, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Joao L. D.",
"surname": "Comba",
"fullName": "Joao L. D. Comba",
"affiliation": "Univ. Fed. do Rio Grande do Sul, Porto Alegre, Brazil",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Luis Gustavo",
"surname": "Nonato",
"fullName": "Luis Gustavo Nonato",
"affiliation": "Depto Mat. Aplic. e Estatistica-ICMC/USP, Univ. de Sao Paulo, Sao Carlos, Brazil",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Robert M.",
"surname": "Kirby",
"fullName": "Robert M. Kirby",
"affiliation": "Sch. of Comput., Univ. of Utah, Salt Lake City, UT, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anders",
"surname": "Ynnerman",
"fullName": "Anders Ynnerman",
"affiliation": "Campus Norrkoping, Linkopings Univ., Norrkoping, Sweden",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Claudio T.",
"surname": "Silva",
"fullName": "Claudio T. Silva",
"affiliation": "Center for Urban Sci. & Progress, New York Univ., Brooklyn, NY, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2014-01-01 00:00:00",
"pubType": "trans",
"pages": "140-154",
"year": "2014",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-vis/2003/2030/0/20300038",
"title": "Acceleration Techniques for GPU-based Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2003/20300038/12OmNC2xhD8",
"parentPublication": {
"id": "proceedings/ieee-vis/2003/2030/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/esiat/2009/3682/2/3682b575",
"title": "Rapid Texture-based Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/esiat/2009/3682b575/12OmNx7G5VW",
"parentPublication": {
"id": "esiat/2009/3682/2",
"title": "Environmental Science and Information Application Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2015/7962/0/7962a017",
"title": "Accurate Volume Rendering Based on Adaptive Numerical Integration",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2015/7962a017/12OmNxaNGjy",
"parentPublication": {
"id": "proceedings/sibgrapi/2015/7962/0",
"title": "2015 28th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/27660038",
"title": "Scale-Invariant Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/27660038/12OmNxb5hu0",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2000/6478/0/64780039",
"title": "Two-Level Volume Rendering-Fusing MIP and DVR",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2000/64780039/12OmNxzMnWP",
"parentPublication": {
"id": "proceedings/ieee-vis/2000/6478/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1995/7187/0/71870011",
"title": "Interactive Maximum Projection Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1995/71870011/12OmNzZmZv2",
"parentPublication": {
"id": "proceedings/ieee-vis/1995/7187/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122364",
"title": "Historygrams: Enabling Interactive Global Illumination in Direct Volume Rendering using Photon Mapping",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122364/13rRUyYjK5h",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/06/v1600",
"title": "Transform Coding for Hardware-accelerated Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2007/06/v1600/13rRUyeTVhV",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122335",
"title": "Fuzzy Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122335/13rRUyeTVi0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2007/01/04020508",
"title": "Hypergraph-Partitioning-Based Remapping Models for Image-Space-Parallel Direct Volume Rendering of Unstructured Grids",
"doi": null,
"abstractUrl": "/journal/td/2007/01/04020508/13rRUygT7eL",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2014010125",
"articleId": "13rRUxNEqPT",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2014010155",
"articleId": "13rRUyYSWkZ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgDB",
"name": "ttg2014010140s2.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2014010140s2.pdf",
"extension": "pdf",
"size": "313 kB",
"__typename": "WebExtraType"
},
{
"id": "17ShDTXFgDA",
"name": "ttg2014010140s1.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2014010140s1.pdf",
"extension": "pdf",
"size": "49.5 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyv7moM",
"title": "Jan.",
"year": "2014",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "20",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyYSWkZ",
"doi": "10.1109/TVCG.2014.2",
"abstract": "The publication offers a note of thanks and lists its reviewers.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The publication offers a note of thanks and lists its reviewers.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The publication offers a note of thanks and lists its reviewers.",
"title": "2013 Reviewers List",
"normalizedTitle": "2013 Reviewers List",
"fno": "ttg2014010155",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"IEEE Publishing"
],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2014-01-01 00:00:00",
"pubType": "trans",
"pages": "155-158",
"year": "2014",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "ttg2014010140",
"articleId": "13rRUwInvB6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "tg13",
"articleId": "13rRUxBJhmT",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyv7moM",
"title": "Jan.",
"year": "2014",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "20",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxBJhmT",
"doi": "10.1109/TVCG.2014.1",
"abstract": "This index covers all technical items - papers, correspondence, reviews, etc. - that appeared in this periodical during the year, and items from previous years that were commented upon or corrected in this year. Departments and other items may also be covered if they have been judged to have archival value. The Author Index contains the primary entry for each item, listed under the first author's name. The primary entry includes the co-authors' names, the title of the paper or other item, and its location, specified by the publication abbreviation, year, month, and inclusive pagination. The Subject Index contains entries describing the item under all appropriate subject headings, plus the first author's name, the publication abbreviation, month, and year, and inclusive pages. Note that the item title is found only under the primary entry in the Author Index.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This index covers all technical items - papers, correspondence, reviews, etc. - that appeared in this periodical during the year, and items from previous years that were commented upon or corrected in this year. Departments and other items may also be covered if they have been judged to have archival value. The Author Index contains the primary entry for each item, listed under the first author's name. The primary entry includes the co-authors' names, the title of the paper or other item, and its location, specified by the publication abbreviation, year, month, and inclusive pagination. The Subject Index contains entries describing the item under all appropriate subject headings, plus the first author's name, the publication abbreviation, month, and year, and inclusive pages. Note that the item title is found only under the primary entry in the Author Index.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This index covers all technical items - papers, correspondence, reviews, etc. - that appeared in this periodical during the year, and items from previous years that were commented upon or corrected in this year. Departments and other items may also be covered if they have been judged to have archival value. The Author Index contains the primary entry for each item, listed under the first author's name. The primary entry includes the co-authors' names, the title of the paper or other item, and its location, specified by the publication abbreviation, year, month, and inclusive pagination. The Subject Index contains entries describing the item under all appropriate subject headings, plus the first author's name, the publication abbreviation, month, and year, and inclusive pages. Note that the item title is found only under the primary entry in the Author Index.",
"title": "2013 Annual Index",
"normalizedTitle": "2013 Annual Index",
"fno": "tg13",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2014-01-01 00:00:00",
"pubType": "trans",
"pages": "not in print-not in print",
"year": "2014",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "ttg2014010155",
"articleId": "13rRUyYSWkZ",
"__typename": "AdjacentArticleType"
},
"next": null,
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNz5JC2z",
"title": "Nov.",
"year": "2017",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "23",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxBa56a",
"doi": "10.1109/TVCG.2017.2738998",
"abstract": "Welcome the November 2017 issue of the <italic>IEEE Transactions on Visualization and Computer Graphics (TVCG)</italic>. This issue contains selected papers accepted at the IEEE International Symposium on Mixed and Augmented Reality (ISMAR), held this year in Nantes, France, from September 9 to September 13, 2017.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Welcome the November 2017 issue of the <italic>IEEE Transactions on Visualization and Computer Graphics (TVCG)</italic>. This issue contains selected papers accepted at the IEEE International Symposium on Mixed and Augmented Reality (ISMAR), held this year in Nantes, France, from September 9 to September 13, 2017.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Welcome the November 2017 issue of the IEEE Transactions on Visualization and Computer Graphics (TVCG). This issue contains selected papers accepted at the IEEE International Symposium on Mixed and Augmented Reality (ISMAR), held this year in Nantes, France, from September 9 to September 13, 2017.",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"normalizedTitle": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"fno": "08053887",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Special Issues And Sections",
"Meetings",
"Augmented Reality"
],
"authors": [
{
"givenName": "Leila",
"surname": "De Floriani",
"fullName": "Leila De Floriani",
"affiliation": "University of Maryland, College Park, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dieter",
"surname": "Schmalstieg",
"fullName": "Dieter Schmalstieg",
"affiliation": "Graz University of Technology, Austria",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "11",
"pubDate": "2017-11-01 00:00:00",
"pubType": "trans",
"pages": "2365-2365",
"year": "2017",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2022/05/09754286",
"title": "IEEE VR 2022 Message from the Journal Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09754286/1Cpd7Bwusk8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09927176",
"title": "Message from the ISMAR 2022 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09927176/1HGJ8mlD3S8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855103",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855103/1dNHm0Dq8lG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855105",
"title": "Message from the ISMAR 2019 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855105/1dNHma690d2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/09052628",
"title": "Introducing the IEEE Virtual Reality 2020 Special Issue",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/09052628/1iFLKo4ODvO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254193",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254193/1oDXLUaRaDK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254194",
"title": "Message from the ISMAR 2020 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254194/1oDXMHvn1aU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09405571",
"title": "Introducing the IEEE Virtual Reality 2021 Special Issue",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09405571/1sP18PmVuQU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09591492",
"title": "Message from the ISMAR 2021 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09591492/1y2FvGMxBuM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09591457",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09591457/1y2Fxh3IZDG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "08007327",
"articleId": "13rRUyft7D7",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNz5JC2z",
"title": "Nov.",
"year": "2017",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "23",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyft7D7",
"doi": "10.1109/TVCG.2017.2734327",
"abstract": "Sonic interaction as a technique for conveying information has advantages over conventional visual augmented reality methods specially when augmenting the visual field with extra information brings distraction. Sonification of knowledge extracted by applying computational methods to sensory data is a well-established concept. However, some aspects of sonic interaction design such as aesthetics, the cognitive effort required for perceiving information, and avoiding alarm fatigue are not well studied in literature. In this work, we present a sonification scheme based on employment of physical modeling sound synthesis which targets focus demanding tasks requiring extreme precision. Proposed mapping techniques are designed to require minimum training for users to adapt to and minimum mental effort to interpret the conveyed information. Two experiments are conducted to assess the feasibility of the proposed method and compare it against visual augmented reality in high precision tasks. The observed quantitative results suggest that utilizing sound patches generated by physical modeling achieve the desired goal of improving the user experience and general task performance with minimal training.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Sonic interaction as a technique for conveying information has advantages over conventional visual augmented reality methods specially when augmenting the visual field with extra information brings distraction. Sonification of knowledge extracted by applying computational methods to sensory data is a well-established concept. However, some aspects of sonic interaction design such as aesthetics, the cognitive effort required for perceiving information, and avoiding alarm fatigue are not well studied in literature. In this work, we present a sonification scheme based on employment of physical modeling sound synthesis which targets focus demanding tasks requiring extreme precision. Proposed mapping techniques are designed to require minimum training for users to adapt to and minimum mental effort to interpret the conveyed information. Two experiments are conducted to assess the feasibility of the proposed method and compare it against visual augmented reality in high precision tasks. The observed quantitative results suggest that utilizing sound patches generated by physical modeling achieve the desired goal of improving the user experience and general task performance with minimal training.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Sonic interaction as a technique for conveying information has advantages over conventional visual augmented reality methods specially when augmenting the visual field with extra information brings distraction. Sonification of knowledge extracted by applying computational methods to sensory data is a well-established concept. However, some aspects of sonic interaction design such as aesthetics, the cognitive effort required for perceiving information, and avoiding alarm fatigue are not well studied in literature. In this work, we present a sonification scheme based on employment of physical modeling sound synthesis which targets focus demanding tasks requiring extreme precision. Proposed mapping techniques are designed to require minimum training for users to adapt to and minimum mental effort to interpret the conveyed information. Two experiments are conducted to assess the feasibility of the proposed method and compare it against visual augmented reality in high precision tasks. The observed quantitative results suggest that utilizing sound patches generated by physical modeling achieve the desired goal of improving the user experience and general task performance with minimal training.",
"title": "SonifEye: Sonification of Visual Information Using Physical Modeling Sound Synthesis",
"normalizedTitle": "SonifEye: Sonification of Visual Information Using Physical Modeling Sound Synthesis",
"fno": "08007327",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Augmented Reality",
"Data Visualisation",
"Sonif Eye",
"Visual Information",
"Sound Patches",
"Conveyed Information",
"Minimum Mental Effort",
"Mapping Techniques",
"Sonification Scheme",
"Cognitive Effort",
"Sonic Interaction Design",
"Sensory Data",
"Computational Methods",
"Visual Field",
"Conventional Visual Augmented Reality Methods",
"Physical Modeling Sound Synthesis",
"Visualization",
"Mathematical Model",
"Augmented Reality",
"Load Modeling",
"Auditory Displays",
"Acceleration",
"Computational Modeling",
"Aural Augmented Reality",
"Sonification",
"Sonic Interaction",
"Auditory Feedback"
],
"authors": [
{
"givenName": "Hessam",
"surname": "Roodaki",
"fullName": "Hessam Roodaki",
"affiliation": "Chair for Computer Aided Medical Procedures, Technische Universitäat Müunchen, Munich, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Navid",
"surname": "Navab",
"fullName": "Navid Navab",
"affiliation": "Topological Media Lab, Concordia University, Montreal, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Abouzar",
"surname": "Eslami",
"fullName": "Abouzar Eslami",
"affiliation": "Carl Zeiss Meditec AG, Munich, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Christopher",
"surname": "Stapleton",
"fullName": "Christopher Stapleton",
"affiliation": "Simiosys Real World Laboratory, Oviedo, FL, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Nassir",
"surname": "Navab",
"fullName": "Nassir Navab",
"affiliation": "Computer Aided Medical Procedures, Johns Hopkins University, Baltimore, MD, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "11",
"pubDate": "2017-11-01 00:00:00",
"pubType": "trans",
"pages": "2366-2371",
"year": "2017",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/sive/2014/5781/0/07006288",
"title": "Reproducible sonification for virtual navigation",
"doi": null,
"abstractUrl": "/proceedings-article/sive/2014/07006288/12OmNAtaS0G",
"parentPublication": {
"id": "proceedings/sive/2014/5781/0",
"title": "2014 IEEE VR Workshop: Sonic Interaction in Virtual Environments (SIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpc/2009/3998/0/05090035",
"title": "Sonification design guidelines to enhance program comprehension",
"doi": null,
"abstractUrl": "/proceedings-article/icpc/2009/05090035/12OmNCcbE84",
"parentPublication": {
"id": "proceedings/icpc/2009/3998/0",
"title": "2009 IEEE 17th International Conference on Program Comprehension (ICPC 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/varms-ieeevr/2015/6926/0/07151725",
"title": "Enhancing visualization of molecular simulations using sonification",
"doi": null,
"abstractUrl": "/proceedings-article/varms-ieeevr/2015/07151725/12OmNx0A7CV",
"parentPublication": {
"id": "proceedings/varms-ieeevr/2015/6926/0",
"title": "2015 IEEE 1st International Workshop on Virtual and Augmented Reality for Molecular Science (VARMS@IEEEVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892327",
"title": "Sound design in virtual reality concert experiences using a wave field synthesis approach",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892327/12OmNxjjEbg",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2002/04/mcg2002040038",
"title": "Synthesizing Sound Textures through Wavelet Tree Learning",
"doi": null,
"abstractUrl": "/magazine/cg/2002/04/mcg2002040038/13rRUNvyanm",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2015/01/mmu2015010048",
"title": "Sonification of Surface Tapping Changes Behavior, Surface Perception, and Emotion",
"doi": null,
"abstractUrl": "/magazine/mu/2015/01/mmu2015010048/13rRUwd9CIo",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2005/02/u2026",
"title": "Interactive Sonification of Choropleth Maps",
"doi": null,
"abstractUrl": "/magazine/mu/2005/02/u2026/13rRUxjQyrR",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/1999/04/c4048",
"title": "Data Sonification and Sound Visualization",
"doi": null,
"abstractUrl": "/magazine/cs/1999/04/c4048/13rRUy08MzR",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sive/2018/5713/0/08577080",
"title": "Quantum: An art-science case study on sonification and sound design in virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/sive/2018/08577080/17D45We0UEe",
"parentPublication": {
"id": "proceedings/sive/2018/5713/0",
"title": "2018 IEEE 4th VR Workshop on Sonic Interactions for Virtual Environments (SIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08053887",
"articleId": "13rRUxBa56a",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08007246",
"articleId": "13rRUwh80Hj",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNz5JC2z",
"title": "Nov.",
"year": "2017",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "23",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwh80Hj",
"doi": "10.1109/TVCG.2017.2735078",
"abstract": "Does it feel the same when you touch an object in Augmented Reality (AR) or in Virtual Reality (VR)? In this paper we study and compare the haptic perception of stiffness of a virtual object in two situations: (1) a purely virtual environment versus (2) a real and augmented environment. We have designed an experimental setup based on a Microsoft HoloLens and a haptic force-feedback device, enabling to press a virtual piston, and compare its stiffness successively in either Augmented Reality (the virtual piston is surrounded by several real objects all located inside a cardboard box) or in Virtual Reality (the same virtual piston is displayed in a fully virtual scene composed of the same other objects). We have conducted a psychophysical experiment with 12 participants. Our results show a surprising bias in perception between the two conditions. The virtual piston is on average perceived stiffer in the VR condition compared to the AR condition. For instance, when the piston had the same stiffness in AR and VR, participants would select the VR piston as the stiffer one in 60% of cases. This suggests a psychological effect as if objects in AR would feel ”softer” than in pure VR. Taken together, our results open new perspectives on perception in AR versus VR, and pave the way to future studies aiming at characterizing potential perceptual biases.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Does it feel the same when you touch an object in Augmented Reality (AR) or in Virtual Reality (VR)? In this paper we study and compare the haptic perception of stiffness of a virtual object in two situations: (1) a purely virtual environment versus (2) a real and augmented environment. We have designed an experimental setup based on a Microsoft HoloLens and a haptic force-feedback device, enabling to press a virtual piston, and compare its stiffness successively in either Augmented Reality (the virtual piston is surrounded by several real objects all located inside a cardboard box) or in Virtual Reality (the same virtual piston is displayed in a fully virtual scene composed of the same other objects). We have conducted a psychophysical experiment with 12 participants. Our results show a surprising bias in perception between the two conditions. The virtual piston is on average perceived stiffer in the VR condition compared to the AR condition. For instance, when the piston had the same stiffness in AR and VR, participants would select the VR piston as the stiffer one in 60% of cases. This suggests a psychological effect as if objects in AR would feel ”softer” than in pure VR. Taken together, our results open new perspectives on perception in AR versus VR, and pave the way to future studies aiming at characterizing potential perceptual biases.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Does it feel the same when you touch an object in Augmented Reality (AR) or in Virtual Reality (VR)? In this paper we study and compare the haptic perception of stiffness of a virtual object in two situations: (1) a purely virtual environment versus (2) a real and augmented environment. We have designed an experimental setup based on a Microsoft HoloLens and a haptic force-feedback device, enabling to press a virtual piston, and compare its stiffness successively in either Augmented Reality (the virtual piston is surrounded by several real objects all located inside a cardboard box) or in Virtual Reality (the same virtual piston is displayed in a fully virtual scene composed of the same other objects). We have conducted a psychophysical experiment with 12 participants. Our results show a surprising bias in perception between the two conditions. The virtual piston is on average perceived stiffer in the VR condition compared to the AR condition. For instance, when the piston had the same stiffness in AR and VR, participants would select the VR piston as the stiffer one in 60% of cases. This suggests a psychological effect as if objects in AR would feel ”softer” than in pure VR. Taken together, our results open new perspectives on perception in AR versus VR, and pave the way to future studies aiming at characterizing potential perceptual biases.",
"title": "AR Feels “Softer” than VR: Haptic Perception of Stiffness in Augmented versus Virtual Reality",
"normalizedTitle": "AR Feels “Softer” than VR: Haptic Perception of Stiffness in Augmented versus Virtual Reality",
"fno": "08007246",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Augmented Reality",
"Force Feedback",
"Haptic Interfaces",
"Pistons",
"Psychology",
"VR Piston",
"Pure VR",
"Haptic Perception",
"Virtual Reality",
"Augmented Reality",
"Virtual Object",
"Purely Virtual Environment",
"Real Environment",
"Augmented Environment",
"Haptic Force Feedback Device",
"Virtual Piston",
"Fully Virtual Scene",
"VR Condition",
"Microsoft Holo Lens",
"Psychological Effect",
"Pistons",
"Haptic Interfaces",
"Visualization",
"Augmented Reality",
"Virtual Reality",
"Virtual Environments",
"Physiology",
"Psychology",
"Augmented Reality",
"Virtual Reality",
"Haptic",
"Perception",
"Stiffness",
"Psychophysical Study"
],
"authors": [
{
"givenName": "Yoren",
"surname": "Gaffary",
"fullName": "Yoren Gaffary",
"affiliation": "InriaIRISA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Benoît",
"surname": "Le Gouis",
"fullName": "Benoît Le Gouis",
"affiliation": "INSA RennesIRISA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Maud",
"surname": "Marchal",
"fullName": "Maud Marchal",
"affiliation": "INSA RennesIRISA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ferran",
"surname": "Argelaguet",
"fullName": "Ferran Argelaguet",
"affiliation": "InriaIRISA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bruno",
"surname": "Arnaldi",
"fullName": "Bruno Arnaldi",
"affiliation": "INSA RennesIRISA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anatole",
"surname": "Lécuyer",
"fullName": "Anatole Lécuyer",
"affiliation": "InriaIRISA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "11",
"pubDate": "2017-11-01 00:00:00",
"pubType": "trans",
"pages": "2372-2377",
"year": "2017",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/1999/0210/0/02100032",
"title": "Virtual Reality and Augmented Reality as a Training Tool for Assembly Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/iv/1999/02100032/12OmNAObbyR",
"parentPublication": {
"id": "proceedings/iv/1999/0210/0",
"title": "1999 IEEE International Conference on Information Visualization (Cat. No. PR00210)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2012/4725/0/4725a116",
"title": "From VR to AR: Adding AR Functionality to an Existing VR Software Framework",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2012/4725a116/12OmNAYoKsE",
"parentPublication": {
"id": "proceedings/svr/2012/4725/0",
"title": "2012 14th Symposium on Virtual and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2002/1492/0/14920287",
"title": "Tinmith-Hand: Unified User Interface Technology for Mobile Outdoor Augmented Reality and Indoor Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2002/14920287/12OmNqH9htu",
"parentPublication": {
"id": "proceedings/vr/2002/1492/0",
"title": "Proceedings IEEE Virtual Reality 2002",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2010/6821/0/05444645",
"title": "Stiffness modulation for Haptic Augmented Reality: Extension to 3D interaction",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2010/05444645/12OmNwGZNQB",
"parentPublication": {
"id": "proceedings/haptics/2010/6821/0",
"title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a253",
"title": "Workshop on VR and AR meet creative industries",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a253/12OmNylKASp",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446280",
"title": "Enhancing the Stiffness Perception of Tangible Objects in Mixed Reality Using Wearable Haptics",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446280/13bd1AIBM2a",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446053",
"title": "High-Fidelity Interaction for Virtual and Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446053/13bd1tl2omt",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798174",
"title": "Comparison in Depth Perception between Virtual Reality and Augmented Reality Systems",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798174/1cJ11OY78k0",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2023/01/09258960",
"title": "Touching Virtual Humans: Haptic Responses Reveal the Emotional Impact of Affective Agents",
"doi": null,
"abstractUrl": "/journal/ta/2023/01/09258960/1oIW8klCOiY",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09495125",
"title": "Being an Avatar “for Real”: A Survey on Virtual Embodiment in Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09495125/1vyju4jl6AE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08007327",
"articleId": "13rRUyft7D7",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08007333",
"articleId": "13rRUygT7fg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXnFts",
"name": "ttg201711-08007246s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201711-08007246s1.zip",
"extension": "zip",
"size": "45.3 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNz5JC2z",
"title": "Nov.",
"year": "2017",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "23",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUygT7fg",
"doi": "10.1109/TVCG.2017.2735098",
"abstract": "This paper presents the results of two cognitive load studies comparing three augmented reality display technologies: spatial augmented reality, the optical see-through Microsoft HoloLens, and the video see-through Samsung Gear VR. In particular, the two experiments focused on isolating the cognitive load cost of receiving instructions for a button-pressing procedural task. The studies employed a self-assessment cognitive load methodology, as well as an additional dual-task cognitive load methodology. The results showed that spatial augmented reality led to increased performance and reduced cognitive load. Additionally, it was discovered that a limited field of view can introduce increased cognitive load requirements. The findings suggest that some of the inherent restrictions of head-mounted displays materialize as increased user cognitive load.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents the results of two cognitive load studies comparing three augmented reality display technologies: spatial augmented reality, the optical see-through Microsoft HoloLens, and the video see-through Samsung Gear VR. In particular, the two experiments focused on isolating the cognitive load cost of receiving instructions for a button-pressing procedural task. The studies employed a self-assessment cognitive load methodology, as well as an additional dual-task cognitive load methodology. The results showed that spatial augmented reality led to increased performance and reduced cognitive load. Additionally, it was discovered that a limited field of view can introduce increased cognitive load requirements. The findings suggest that some of the inherent restrictions of head-mounted displays materialize as increased user cognitive load.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents the results of two cognitive load studies comparing three augmented reality display technologies: spatial augmented reality, the optical see-through Microsoft HoloLens, and the video see-through Samsung Gear VR. In particular, the two experiments focused on isolating the cognitive load cost of receiving instructions for a button-pressing procedural task. The studies employed a self-assessment cognitive load methodology, as well as an additional dual-task cognitive load methodology. The results showed that spatial augmented reality led to increased performance and reduced cognitive load. Additionally, it was discovered that a limited field of view can introduce increased cognitive load requirements. The findings suggest that some of the inherent restrictions of head-mounted displays materialize as increased user cognitive load.",
"title": "Cognitive Cost of Using Augmented Reality Displays",
"normalizedTitle": "Cognitive Cost of Using Augmented Reality Displays",
"fno": "08007333",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Augmented Reality",
"Cognition",
"Helmet Mounted Displays",
"Wearable Computers",
"Cognitive Load Cost",
"Button Pressing Procedural Task",
"Self Assessment Cognitive Load Methodology",
"Spatial Augmented Reality",
"Increased Cognitive Load Requirements",
"Augmented Reality Displays",
"Microsoft Holo Lens",
"Samsung Gear VR",
"Dual Task Cognitive Load Methodology",
"User Cognitive Load",
"Head Mounted Displays",
"Human Computer Interaction",
"Augmented Reality",
"Cognition",
"Monitoring",
"Mobile Communication",
"Training",
"Visual Perception",
"Augmented Reality",
"Human Computer Interaction",
"Cognitive Load"
],
"authors": [
{
"givenName": "James",
"surname": "Baumeister",
"fullName": "James Baumeister",
"affiliation": "Wearable Computer Lab",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Seung Youb",
"surname": "Ssin",
"fullName": "Seung Youb Ssin",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Neven A. M.",
"surname": "ElSayed",
"fullName": "Neven A. M. ElSayed",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jillian",
"surname": "Dorrian",
"fullName": "Jillian Dorrian",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "David P.",
"surname": "Webb",
"fullName": "David P. Webb",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "James A.",
"surname": "Walsh",
"fullName": "James A. Walsh",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Timothy M.",
"surname": "Simon",
"fullName": "Timothy M. Simon",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Andrew",
"surname": "Irlitti",
"fullName": "Andrew Irlitti",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ross T.",
"surname": "Smith",
"fullName": "Ross T. Smith",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mark",
"surname": "Kohler",
"fullName": "Mark Kohler",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bruce H.",
"surname": "Thomas",
"fullName": "Bruce H. Thomas",
"affiliation": "Wearable Computer Lab",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "11",
"pubDate": "2017-11-01 00:00:00",
"pubType": "trans",
"pages": "2378-2388",
"year": "2017",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/Ismar-mashd/2015/9628/0/9628a006",
"title": "The Effect of Tangible User Interfaces on Cognitive Load in the Creative Design Process",
"doi": null,
"abstractUrl": "/proceedings-article/Ismar-mashd/2015/9628a006/12OmNC3Xhjl",
"parentPublication": {
"id": "proceedings/Ismar-mashd/2015/9628/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality - Media, Art, Social Science, Humanities and Design (ISMAR-MASH'D)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2002/1781/0/17810215",
"title": "Augmented-Reality Visualizations Guided by Cognition:Perceptual Heuristics for Combining Visible and Obscured Information",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2002/17810215/12OmNrMZpxI",
"parentPublication": {
"id": "proceedings/ismar/2002/1781/0",
"title": "Proceedings. International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2015/7660/0/7660a049",
"title": "The Ventriloquist Effect in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a049/12OmNvAiSE1",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/u-media/2011/4493/0/4493a253",
"title": "The Feasibility of Augmented Reality on Virtual Tourism Website",
"doi": null,
"abstractUrl": "/proceedings-article/u-media/2011/4493a253/12OmNvnOwyH",
"parentPublication": {
"id": "proceedings/u-media/2011/4493/0",
"title": "International Conference on Ubi-Media Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isar/2001/1375/0/13750114",
"title": "Mobile Collaborative Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/isar/2001/13750114/12OmNxFaLwB",
"parentPublication": {
"id": "proceedings/isar/2001/1375/0",
"title": "Proceedings IEEE and ACM International Symposium on Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/euc/2014/5249/0/5249a287",
"title": "Mobile Augmented Reality System for Marine Navigation Assistance",
"doi": null,
"abstractUrl": "/proceedings-article/euc/2014/5249a287/12OmNxxdZFo",
"parentPublication": {
"id": "proceedings/euc/2014/5249/0",
"title": "2014 12th IEEE International Conference on Embedded and Ubiquitous Computing (EUC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcs/1999/0253/1/02539195",
"title": "Haptics in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/icmcs/1999/02539195/12OmNyQ7G3s",
"parentPublication": {
"id": "proceedings/icmcs/1999/0253/1",
"title": "Multimedia Computing and Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2012/07/mco2012070026",
"title": "Anywhere Interfaces Using Handheld Augmented Reality",
"doi": null,
"abstractUrl": "/magazine/co/2012/07/mco2012070026/13rRUxYrbPM",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a329",
"title": "Anthropomorphism of Virtual Agents and Human Cognitive Performance in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a329/1CJdRqHEpry",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eitt/2019/4288/0/428800a239",
"title": "Impacts of Different Types of Scaffolding on Academic Performance, Cognitive Load and Satisfaction in Scientific Inquiry Activities Based on Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/eitt/2019/428800a239/1fHkRE8QKd2",
"parentPublication": {
"id": "proceedings/eitt/2019/4288/0",
"title": "2019 Eighth International Conference of Educational Innovation through Technology (EITT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08007246",
"articleId": "13rRUwh80Hj",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08007295",
"articleId": "13rRUNvgz4n",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNz5JC2z",
"title": "Nov.",
"year": "2017",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "23",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUNvgz4n",
"doi": "10.1109/TVCG.2017.2734458",
"abstract": "Visual SLAM is one of the key technologies to align the virtual and real world together in Augmented Reality applications. RGBD dense Visual SLAM approaches have shown their advantages in robustness and accuracy in recent years. However, there are still several challenges such as the inconsistencies in RGBD measurements across multiple frames that could jeopardize the accuracy of both camera trajectory and scene reconstruction. In this paper, we propose a novel map representation called Probabilistic Surfel Map (PSM) for dense visual SLAM. The main idea is to maintain a globally consistent map with both photometric and geometric uncertainties encoded in order to address the inconsistency issue. The key of our PSM is proper modeling and updating of sensor measurement uncertainties, as well as the strategies to apply them for improving both the front-end pose estimation and the back-end optimization. Experimental results on publicly available datasets demonstrate major improvements with our approach over the state-of-the-art methods. Specifically, comparing with σ-DVO, we achieve a 40% reduction in absolute trajectory error and an 18% reduction in relative pose error in visual odometry, as well as an 8.5% reduction in absolute trajectory error in complete SLAM. Moreover, our PSM enables generation of a high quality dense point cloud with comparable accuracy as the state-of-the-art approach.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Visual SLAM is one of the key technologies to align the virtual and real world together in Augmented Reality applications. RGBD dense Visual SLAM approaches have shown their advantages in robustness and accuracy in recent years. However, there are still several challenges such as the inconsistencies in RGBD measurements across multiple frames that could jeopardize the accuracy of both camera trajectory and scene reconstruction. In this paper, we propose a novel map representation called Probabilistic Surfel Map (PSM) for dense visual SLAM. The main idea is to maintain a globally consistent map with both photometric and geometric uncertainties encoded in order to address the inconsistency issue. The key of our PSM is proper modeling and updating of sensor measurement uncertainties, as well as the strategies to apply them for improving both the front-end pose estimation and the back-end optimization. Experimental results on publicly available datasets demonstrate major improvements with our approach over the state-of-the-art methods. Specifically, comparing with σ-DVO, we achieve a 40% reduction in absolute trajectory error and an 18% reduction in relative pose error in visual odometry, as well as an 8.5% reduction in absolute trajectory error in complete SLAM. Moreover, our PSM enables generation of a high quality dense point cloud with comparable accuracy as the state-of-the-art approach.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Visual SLAM is one of the key technologies to align the virtual and real world together in Augmented Reality applications. RGBD dense Visual SLAM approaches have shown their advantages in robustness and accuracy in recent years. However, there are still several challenges such as the inconsistencies in RGBD measurements across multiple frames that could jeopardize the accuracy of both camera trajectory and scene reconstruction. In this paper, we propose a novel map representation called Probabilistic Surfel Map (PSM) for dense visual SLAM. The main idea is to maintain a globally consistent map with both photometric and geometric uncertainties encoded in order to address the inconsistency issue. The key of our PSM is proper modeling and updating of sensor measurement uncertainties, as well as the strategies to apply them for improving both the front-end pose estimation and the back-end optimization. Experimental results on publicly available datasets demonstrate major improvements with our approach over the state-of-the-art methods. Specifically, comparing with σ-DVO, we achieve a 40% reduction in absolute trajectory error and an 18% reduction in relative pose error in visual odometry, as well as an 8.5% reduction in absolute trajectory error in complete SLAM. Moreover, our PSM enables generation of a high quality dense point cloud with comparable accuracy as the state-of-the-art approach.",
"title": "Dense Visual SLAM with Probabilistic Surfel Map",
"normalizedTitle": "Dense Visual SLAM with Probabilistic Surfel Map",
"fno": "08007295",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Augmented Reality",
"Cameras",
"Distance Measurement",
"Image Reconstruction",
"Image Representation",
"Pose Estimation",
"Robot Vision",
"SLAM Robots",
"Augmented Reality Applications",
"RGBD Dense Visual SLAM Approaches",
"RGBD Measurements",
"Scene Reconstruction",
"Probabilistic Surfel Map",
"PSM",
"Globally Consistent Map",
"Photometric Uncertainties",
"Geometric Uncertainties",
"Sensor Measurement Uncertainties",
"Absolute Trajectory Error",
"Visual Odometry",
"High Quality Dense Point Cloud",
"Camera Trajectory",
"Map Representation",
"Front End Pose Estimation",
"Back End Optimization",
"Simultaneously Localization And Mapping",
"Visualization",
"Simultaneous Localization And Mapping",
"Three Dimensional Displays",
"Cameras",
"Image Reconstruction",
"Trajectory",
"Augmented Reality",
"Object Tracking",
"Visual SLAM",
"Dense Visual Odometry",
"RGBD 6 Do F Tracking",
"3 D Reconstruction",
"Augmented Reality"
],
"authors": [
{
"givenName": "Zhixin",
"surname": "Yan",
"fullName": "Zhixin Yan",
"affiliation": "Bosch Research North America, Palo Alto, CA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mao",
"surname": "Ye",
"fullName": "Mao Ye",
"affiliation": "Bosch Research North America, Palo Alto, CA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Liu",
"surname": "Ren",
"fullName": "Liu Ren",
"affiliation": "Bosch Research North America, Palo Alto, CA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "11",
"pubDate": "2017-11-01 00:00:00",
"pubType": "trans",
"pages": "2389-2398",
"year": "2017",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/case/2012/0430/0/06386480",
"title": "Fast randomized planner for SLAM automation",
"doi": null,
"abstractUrl": "/proceedings-article/case/2012/06386480/12OmNqzu6R9",
"parentPublication": {
"id": "proceedings/case/2012/0430/0",
"title": "2012 IEEE International Conference on Automation Science and Engineering (CASE 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948422",
"title": "Dense planar SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948422/12OmNx7G661",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457g565",
"title": "CNN-SLAM: Real-Time Dense Monocular SLAM with Learned Depth Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457g565/12OmNzTH0Qa",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icceic/2021/0212/0/021200a081",
"title": "IDMC-VSLAM: Improved dense map construction and visual SLAM in dynamic environments",
"doi": null,
"abstractUrl": "/proceedings-article/icceic/2021/021200a081/1AFsC5XRpMk",
"parentPublication": {
"id": "proceedings/icceic/2021/0212/0",
"title": "2021 2nd International Conference on Computer Engineering and Intelligent Control (ICCEIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600d096",
"title": "Probabilistic Volumetric Fusion for Dense Monocular SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600d096/1L8qEHGGTlu",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asap/2019/1601/0/160100a083",
"title": "FPGA Architectures for Real-time Dense SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/asap/2019/160100a083/1d5kDsB2fdu",
"parentPublication": {
"id": "proceedings/asap/2019/1601/2160-052X",
"title": "2019 IEEE 30th International Conference on Application-specific Systems, Architectures and Processors (ASAP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pact/2019/3613/0/361300a296",
"title": "SLAMBooster: An Application-Aware Online Controller for Approximation in Dense SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/pact/2019/361300a296/1eLy3QnWKuA",
"parentPublication": {
"id": "proceedings/pact/2019/3613/0",
"title": "2019 28th International Conference on Parallel Architectures and Compilation Techniques (PACT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300a134",
"title": "BAD SLAM: Bundle Adjusted Direct RGB-D SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300a134/1gyr8GIX9E4",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300f854",
"title": "Learning Meshes for Dense Visual SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300f854/1hVlbuejvUI",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icwcsg/2021/2598/0/259800a327",
"title": "Visual SLAM algorithm based on RGB-D",
"doi": null,
"abstractUrl": "/proceedings-article/icwcsg/2021/259800a327/1yQB8ogDPRm",
"parentPublication": {
"id": "proceedings/icwcsg/2021/2598/0",
"title": "2021 International Conference on Wireless Communications and Smart Grid (ICWCSG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08007333",
"articleId": "13rRUygT7fg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08007238",
"articleId": "13rRUwInvl5",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgOM",
"name": "ttg201711-08007295s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201711-08007295s1.zip",
"extension": "zip",
"size": "69.8 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNz5JC2z",
"title": "Nov.",
"year": "2017",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "23",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwInvl5",
"doi": "10.1109/TVCG.2017.2734539",
"abstract": "3D object temporal trackers estimate the 3D rotation and 3D translation of a rigid object by propagating the transformation from one frame to the next. To confront this task, algorithms either learn the transformation between two consecutive frames or optimize an energy function to align the object to the scene. The motivation behind our approach stems from a consideration on the nature of learners and optimizers. Throughout the evaluation of different types of objects and working conditions, we observe their complementary nature — on one hand, learners are more robust when undergoing challenging scenarios, while optimizers are prone to tracking failures due to the entrapment at local minima; on the other, optimizers can converge to a better accuracy and minimize jitter. Therefore, we propose to bridge the gap between learners and optimizers to attain a robust and accurate RGB-D temporal tracker that runs at approximately 2 ms per frame using one CPU core. Our work is highly suitable for Augmented Reality (AR), Mixed Reality (MR) and Virtual Reality (VR) applications due to its robustness, accuracy, efficiency and low latency. Aiming at stepping beyond the simple scenarios used by current systems, often constrained by having a single object in the absence of clutter, averting to touch the object to prevent close-range partial occlusion or selecting brightly colored objects to easily segment them individually, we demonstrate the capacity to handle challenging cases under clutter, partial occlusion and varying lighting conditions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "3D object temporal trackers estimate the 3D rotation and 3D translation of a rigid object by propagating the transformation from one frame to the next. To confront this task, algorithms either learn the transformation between two consecutive frames or optimize an energy function to align the object to the scene. The motivation behind our approach stems from a consideration on the nature of learners and optimizers. Throughout the evaluation of different types of objects and working conditions, we observe their complementary nature — on one hand, learners are more robust when undergoing challenging scenarios, while optimizers are prone to tracking failures due to the entrapment at local minima; on the other, optimizers can converge to a better accuracy and minimize jitter. Therefore, we propose to bridge the gap between learners and optimizers to attain a robust and accurate RGB-D temporal tracker that runs at approximately 2 ms per frame using one CPU core. Our work is highly suitable for Augmented Reality (AR), Mixed Reality (MR) and Virtual Reality (VR) applications due to its robustness, accuracy, efficiency and low latency. Aiming at stepping beyond the simple scenarios used by current systems, often constrained by having a single object in the absence of clutter, averting to touch the object to prevent close-range partial occlusion or selecting brightly colored objects to easily segment them individually, we demonstrate the capacity to handle challenging cases under clutter, partial occlusion and varying lighting conditions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "3D object temporal trackers estimate the 3D rotation and 3D translation of a rigid object by propagating the transformation from one frame to the next. To confront this task, algorithms either learn the transformation between two consecutive frames or optimize an energy function to align the object to the scene. The motivation behind our approach stems from a consideration on the nature of learners and optimizers. Throughout the evaluation of different types of objects and working conditions, we observe their complementary nature — on one hand, learners are more robust when undergoing challenging scenarios, while optimizers are prone to tracking failures due to the entrapment at local minima; on the other, optimizers can converge to a better accuracy and minimize jitter. Therefore, we propose to bridge the gap between learners and optimizers to attain a robust and accurate RGB-D temporal tracker that runs at approximately 2 ms per frame using one CPU core. Our work is highly suitable for Augmented Reality (AR), Mixed Reality (MR) and Virtual Reality (VR) applications due to its robustness, accuracy, efficiency and low latency. Aiming at stepping beyond the simple scenarios used by current systems, often constrained by having a single object in the absence of clutter, averting to touch the object to prevent close-range partial occlusion or selecting brightly colored objects to easily segment them individually, we demonstrate the capacity to handle challenging cases under clutter, partial occlusion and varying lighting conditions.",
"title": "Looking Beyond the Simple Scenarios: Combining Learners and Optimizers in 3D Temporal Tracking",
"normalizedTitle": "Looking Beyond the Simple Scenarios: Combining Learners and Optimizers in 3D Temporal Tracking",
"fno": "08007238",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Robustness",
"Three Dimensional Displays",
"Tracking",
"Cameras",
"Solid Modeling",
"Clutter",
"Iterative Closest Point Algorithm",
"3 D Tracking",
"Random Forest",
"6 D Pose Estimation"
],
"authors": [
{
"givenName": "David Joseph",
"surname": "Tan",
"fullName": "David Joseph Tan",
"affiliation": "Technische Universität München",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Nassir",
"surname": "Navab",
"fullName": "Nassir Navab",
"affiliation": "Technische Universität München",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Federico",
"surname": "Tombari",
"fullName": "Federico Tombari",
"affiliation": "Technische Universität München",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "11",
"pubDate": "2017-11-01 00:00:00",
"pubType": "trans",
"pages": "2399-2409",
"year": "2017",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2013/4795/0/06549351",
"title": "An advanced interaction framework for augmented reality based exposure treatment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549351/12OmNAgGwdn",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130357",
"title": "Real-time multi-person tracking with detector assisted structure propagation",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130357/12OmNqIzh3f",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034b284",
"title": "Real-Time Hand Tracking Under Occlusion from an Egocentric RGB-D Sensor",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034b284/12OmNrY3LA8",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032b163",
"title": "Real-Time Hand Tracking under Occlusion from an Egocentric RGB-D Sensor",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032b163/12OmNwJybPZ",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2015/7660/0/7660a148",
"title": "[POSTER] Rubix: Dynamic Spatial Augmented Reality by Extraction of Plane Regions with a RGB-D Camera",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a148/12OmNyKJicb",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206776",
"title": "Linear solution to scale and rotation invariant object matching",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206776/12OmNyugz2W",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2016/07/07299676",
"title": "A Global Hypothesis Verification Framework for 3D Object Recognition in Clutter",
"doi": null,
"abstractUrl": "/journal/tp/2016/07/07299676/13rRUNvPLaW",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1999/05/i0433",
"title": "Using Spin Images for Efficient Object Recognition in Cluttered 3D Scenes",
"doi": null,
"abstractUrl": "/journal/tp/1999/05/i0433/13rRUx0gevS",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/03/07138633",
"title": "Sticky Projections-A Model-Based Approach to Interactive Shader Lamps Tracking",
"doi": null,
"abstractUrl": "/journal/tg/2016/03/07138633/13rRUxly8XI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800a927",
"title": "Scene Recomposition by Learning-Based ICP",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800a927/1m3ne124gRa",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08007295",
"articleId": "13rRUNvgz4n",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08007334",
"articleId": "13rRUxYIMV7",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgFn",
"name": "ttg201711-08007238s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201711-08007238s1.zip",
"extension": "zip",
"size": "11 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNz5JC2z",
"title": "Nov.",
"year": "2017",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "23",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxYIMV7",
"doi": "10.1109/TVCG.2017.2734599",
"abstract": "We present a temporal 6-DOF tracking method which leverages deep learning to achieve state-of-the-art performance on challenging datasets of real world capture. Our method is both more accurate and more robust to occlusions than the existing best performing approaches while maintaining real-time performance. To assess its efficacy, we evaluate our approach on several challenging RGBD sequences of real objects in a variety of conditions. Notably, we systematically evaluate robustness to occlusions through a series of sequences where the object to be tracked is increasingly occluded. Finally, our approach is purely data-driven and does not require any hand-designed features: robust tracking is automatically learned from data.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a temporal 6-DOF tracking method which leverages deep learning to achieve state-of-the-art performance on challenging datasets of real world capture. Our method is both more accurate and more robust to occlusions than the existing best performing approaches while maintaining real-time performance. To assess its efficacy, we evaluate our approach on several challenging RGBD sequences of real objects in a variety of conditions. Notably, we systematically evaluate robustness to occlusions through a series of sequences where the object to be tracked is increasingly occluded. Finally, our approach is purely data-driven and does not require any hand-designed features: robust tracking is automatically learned from data.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a temporal 6-DOF tracking method which leverages deep learning to achieve state-of-the-art performance on challenging datasets of real world capture. Our method is both more accurate and more robust to occlusions than the existing best performing approaches while maintaining real-time performance. To assess its efficacy, we evaluate our approach on several challenging RGBD sequences of real objects in a variety of conditions. Notably, we systematically evaluate robustness to occlusions through a series of sequences where the object to be tracked is increasingly occluded. Finally, our approach is purely data-driven and does not require any hand-designed features: robust tracking is automatically learned from data.",
"title": "Deep 6-DOF Tracking",
"normalizedTitle": "Deep 6-DOF Tracking",
"fno": "08007334",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Robustness",
"Cameras",
"Real Time Systems",
"Rendering Computer Graphics",
"Three Dimensional Displays",
"Machine Learning",
"Neural Networks",
"Tracking",
"Deep Learning",
"Augmented Reality"
],
"authors": [
{
"givenName": "Mathieu",
"surname": "Garon",
"fullName": "Mathieu Garon",
"affiliation": "Université Laval",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jean-François",
"surname": "Lalonde",
"fullName": "Jean-François Lalonde",
"affiliation": "Université Laval",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "11",
"pubDate": "2017-11-01 00:00:00",
"pubType": "trans",
"pages": "2410-2418",
"year": "2017",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icassp/2004/8484/3/01326466",
"title": "Robust two-camera tracking using homography",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326466/12OmNAXxXdU",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/3",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761044",
"title": "Robust tracking of spatial related components",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761044/12OmNBtl1Ez",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460934",
"title": "A modified KLT multiple objects tracking framework based on global segmentation and adaptive template",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460934/12OmNvonIKs",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icat/2013/11/0/06728915",
"title": "Wide area optical user tracking in unconstrained indoor environments",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2013/06728915/12OmNx2zjvN",
"parentPublication": {
"id": "proceedings/icat/2013/11/0",
"title": "2013 23rd International Conference on Artificial Reality and Telexistence (ICAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671768",
"title": "Simultaneous 3D tracking and reconstruction on a mobile phone",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671768/12OmNxdVgZ2",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2012/4660/0/06402533",
"title": "Kinectrack: Agile 6-DoF tracking using a projected dot pattern",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402533/12OmNxdVh0E",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2017/2818/0/2818a329",
"title": "4-DoF Tracking for Robot Fine Manipulation Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2017/2818a329/12OmNz61cYb",
"parentPublication": {
"id": "proceedings/crv/2017/2818/0",
"title": "2017 14th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2016/5407/0/5407a441",
"title": "Robust Real-Time 3D Face Tracking from RGBD Videos under Extreme Pose, Depth, and Expression Variation",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2016/5407a441/12OmNzC5TfM",
"parentPublication": {
"id": "proceedings/3dv/2016/5407/0",
"title": "2016 Fourth International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/05/06892950",
"title": "Real-Time 3D Tracking and Reconstruction on Mobile Phones",
"doi": null,
"abstractUrl": "/journal/tg/2015/05/06892950/13rRUwInvyC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200k0705",
"title": "DepthTrack: Unveiling the Power of RGBD Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200k0705/1BmK0oMbvk4",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08007238",
"articleId": "13rRUwInvl5",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08007248",
"articleId": "13rRUxASupD",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRUN",
"name": "ttg201711-08007334s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201711-08007334s1.zip",
"extension": "zip",
"size": "69.9 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNz5JC2z",
"title": "Nov.",
"year": "2017",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "23",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxASupD",
"doi": "10.1109/TVCG.2017.2734478",
"abstract": "This paper presents a novel projected pixel localization principle for online geometric registration in dynamic projection mapping applications. We propose applying a time measurement of a laser projector raster-scanning beam using a photosensor to estimate its position while the projector displays meaningful visual information to human observers. Based on this principle, we develop two types of position estimation techniques. One estimates the position of a projected beam when it directly illuminates a photosensor. The other localizes a beam by measuring the reflection from a retro-reflective marker with the photosensor placed in the optical path of the projector. We conduct system evaluations using prototypes to validate this method as well as to confirm the applicability of our principle. In addition, we discuss the technical limitations of the prototypes based on the evaluation results. Finally, we build several dynamic projection mapping applications to demonstrate the feasibility of our principle.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a novel projected pixel localization principle for online geometric registration in dynamic projection mapping applications. We propose applying a time measurement of a laser projector raster-scanning beam using a photosensor to estimate its position while the projector displays meaningful visual information to human observers. Based on this principle, we develop two types of position estimation techniques. One estimates the position of a projected beam when it directly illuminates a photosensor. The other localizes a beam by measuring the reflection from a retro-reflective marker with the photosensor placed in the optical path of the projector. We conduct system evaluations using prototypes to validate this method as well as to confirm the applicability of our principle. In addition, we discuss the technical limitations of the prototypes based on the evaluation results. Finally, we build several dynamic projection mapping applications to demonstrate the feasibility of our principle.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a novel projected pixel localization principle for online geometric registration in dynamic projection mapping applications. We propose applying a time measurement of a laser projector raster-scanning beam using a photosensor to estimate its position while the projector displays meaningful visual information to human observers. Based on this principle, we develop two types of position estimation techniques. One estimates the position of a projected beam when it directly illuminates a photosensor. The other localizes a beam by measuring the reflection from a retro-reflective marker with the photosensor placed in the optical path of the projector. We conduct system evaluations using prototypes to validate this method as well as to confirm the applicability of our principle. In addition, we discuss the technical limitations of the prototypes based on the evaluation results. Finally, we build several dynamic projection mapping applications to demonstrate the feasibility of our principle.",
"title": "Simultaneous Projection and Positioning of Laser Projector Pixels",
"normalizedTitle": "Simultaneous Projection and Positioning of Laser Projector Pixels",
"fno": "08007248",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Measurement By Laser Beam",
"Laser Beams",
"Position Measurement",
"Surface Texture",
"Cameras",
"Calibration",
"Lasers",
"Dynamic Projection Mapping",
"Spatial Augmented Reality",
"Laser Projector",
"Light Pen",
"Geometric Registration"
],
"authors": [
{
"givenName": "Yuki",
"surname": "Kitajima",
"fullName": "Yuki Kitajima",
"affiliation": "Graduate School of Engineering ScienceOsaka University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Daisuke",
"surname": "Iwai",
"fullName": "Daisuke Iwai",
"affiliation": "Graduate School of Engineering ScienceOsaka University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kosuke",
"surname": "Sato",
"fullName": "Kosuke Sato",
"affiliation": "Graduate School of Engineering ScienceOsaka University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "11",
"pubDate": "2017-11-01 00:00:00",
"pubType": "trans",
"pages": "2419-2429",
"year": "2017",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2014/5209/0/5209c095",
"title": "3D Acquisition of Occluded Surfaces from Scattering in Participating Media",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209c095/12OmNAlvHRN",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imccc/2016/1195/0/07774873",
"title": "A New Positioning Method for Indoor Laser Navigation on Under-Determined Condition",
"doi": null,
"abstractUrl": "/proceedings-article/imccc/2016/07774873/12OmNwoxSaK",
"parentPublication": {
"id": "proceedings/imccc/2016/1195/0",
"title": "2016 Sixth International Conference on Instrumentation & Measurement, Computer, Communication and Control (IMCCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isot/2014/6752/0/07119393",
"title": "Recent Development of Using Optical Methods to Measure the Mechanical Properties of Thin Films",
"doi": null,
"abstractUrl": "/proceedings-article/isot/2014/07119393/12OmNx9nGGj",
"parentPublication": {
"id": "proceedings/isot/2014/6752/0",
"title": "2014 International Symposium on Optomechatronic Technologies (ISOT 2014)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ectc/2017/6315/0/07999928",
"title": "Multi Beam Full Cut Dicing of Thin Si IC Wafers",
"doi": null,
"abstractUrl": "/proceedings-article/ectc/2017/07999928/12OmNxFsmCC",
"parentPublication": {
"id": "proceedings/ectc/2017/6315/0",
"title": "2017 IEEE 67th Electronic Components and Technology Conference (ECTC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2014/4308/0/4308a449",
"title": "Projection Center Calibration for a Co-located Projector Camera System",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2014/4308a449/12OmNypIYA4",
"parentPublication": {
"id": "proceedings/cvprw/2014/4308/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isot/2014/6752/0/07119447",
"title": "Acoustic Research and Control of Piezoelectric Speakers Using a Spatially Modulated TiOPc/Piezo Buzzer Actuator",
"doi": null,
"abstractUrl": "/proceedings-article/isot/2014/07119447/12OmNzCF4UY",
"parentPublication": {
"id": "proceedings/isot/2014/6752/0",
"title": "2014 International Symposium on Optomechatronic Technologies (ISOT 2014)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/qe/2022/01/09964063",
"title": "The “Squeeze Laser”",
"doi": null,
"abstractUrl": "/journal/qe/2022/01/09964063/1IAFLDGVVVm",
"parentPublication": {
"id": "trans/qe",
"title": "IEEE Transactions on Quantum Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcce/2019/4689/0/468900a470",
"title": "Design of a Stroboscopic Laser Grating Stripe Projection Device",
"doi": null,
"abstractUrl": "/proceedings-article/icmcce/2019/468900a470/1h0FgohMNG0",
"parentPublication": {
"id": "proceedings/icmcce/2019/4689/0",
"title": "2019 4th International Conference on Mechanical, Control and Computer Engineering (ICMCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2021/3892/0/389200a628",
"title": "Study on spherical aberration in the laser optical system",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2021/389200a628/1t2n9aXMNPO",
"parentPublication": {
"id": "proceedings/icmtma/2021/3892/0",
"title": "2021 13th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccsmt/2020/8668/0/866800a009",
"title": "SLAM Global Positioning Algorithm Based on Laser and Vision Fusion",
"doi": null,
"abstractUrl": "/proceedings-article/iccsmt/2020/866800a009/1u8pDO7YPzG",
"parentPublication": {
"id": "proceedings/iccsmt/2020/8668/0",
"title": "2020 International Conference on Computer Science and Management Technology (ICCSMT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08007334",
"articleId": "13rRUxYIMV7",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08007213",
"articleId": "13rRUxcsYLX",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgAl",
"name": "ttg201711-08007248s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201711-08007248s1.zip",
"extension": "zip",
"size": "162 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNz5JC2z",
"title": "Nov.",
"year": "2017",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "23",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxcsYLX",
"doi": "10.1109/TVCG.2017.2734598",
"abstract": "We present a geometric calibration method to accurately register a galvanoscopic scanning laser projection system (GLP) based on 2D vector input data onto an arbitrarily complex 3D-shaped projection surface. This method allows for accurate merging of 3D vertex data displayed on the laser projector with geometrically calibrated standard rasterization-based video projectors that are registered to the same geometry. Because laser projectors send out a laser light beam via galvanoscopic mirrors, a standard pinhole model calibration procedure that is normally used for pixel raster displays projecting structured light patterns, such as Gray codes, cannot be carried out directly with sufficient accuracy as the rays do not converge into a single point. To overcome the complications of accurately registering the GLP while still enabling a treatment equivalent to a standard pinhole device, an adapted version is applied to enable straightforward content generation. Besides the geometrical calibration, we also present a photometric calibration to unify the color appearance of GLPs and standard video projectors maximizing the advantages of the large color gamut of the GLP and optimizing its color appearance to smoothly fade into the significantly smaller gamut of the video projector. The proposed algorithms were evaluated on a prototypical mixed video projector and GLP projection mapping setup.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a geometric calibration method to accurately register a galvanoscopic scanning laser projection system (GLP) based on 2D vector input data onto an arbitrarily complex 3D-shaped projection surface. This method allows for accurate merging of 3D vertex data displayed on the laser projector with geometrically calibrated standard rasterization-based video projectors that are registered to the same geometry. Because laser projectors send out a laser light beam via galvanoscopic mirrors, a standard pinhole model calibration procedure that is normally used for pixel raster displays projecting structured light patterns, such as Gray codes, cannot be carried out directly with sufficient accuracy as the rays do not converge into a single point. To overcome the complications of accurately registering the GLP while still enabling a treatment equivalent to a standard pinhole device, an adapted version is applied to enable straightforward content generation. Besides the geometrical calibration, we also present a photometric calibration to unify the color appearance of GLPs and standard video projectors maximizing the advantages of the large color gamut of the GLP and optimizing its color appearance to smoothly fade into the significantly smaller gamut of the video projector. The proposed algorithms were evaluated on a prototypical mixed video projector and GLP projection mapping setup.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a geometric calibration method to accurately register a galvanoscopic scanning laser projection system (GLP) based on 2D vector input data onto an arbitrarily complex 3D-shaped projection surface. This method allows for accurate merging of 3D vertex data displayed on the laser projector with geometrically calibrated standard rasterization-based video projectors that are registered to the same geometry. Because laser projectors send out a laser light beam via galvanoscopic mirrors, a standard pinhole model calibration procedure that is normally used for pixel raster displays projecting structured light patterns, such as Gray codes, cannot be carried out directly with sufficient accuracy as the rays do not converge into a single point. To overcome the complications of accurately registering the GLP while still enabling a treatment equivalent to a standard pinhole device, an adapted version is applied to enable straightforward content generation. Besides the geometrical calibration, we also present a photometric calibration to unify the color appearance of GLPs and standard video projectors maximizing the advantages of the large color gamut of the GLP and optimizing its color appearance to smoothly fade into the significantly smaller gamut of the video projector. The proposed algorithms were evaluated on a prototypical mixed video projector and GLP projection mapping setup.",
"title": "Geometric and Photometric Consistency in a Mixed Video and Galvanoscopic Scanning Laser Projection Mapping System",
"normalizedTitle": "Geometric and Photometric Consistency in a Mixed Video and Galvanoscopic Scanning Laser Projection Mapping System",
"fno": "08007213",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Calibration",
"Image Color Analysis",
"Three Dimensional Displays",
"Standards",
"Lasers",
"Cameras",
"Optical Distortion",
"Projector Camera Systems",
"Calibration And Registration Of Sensing Systems",
"Display Hardware",
"Including 3 D",
"Stereoscopic And Multi User Entertainment",
"Broadcast"
],
"authors": [
{
"givenName": "Petar",
"surname": "Pjanic",
"fullName": "Petar Pjanic",
"affiliation": "Disney Research",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Simon",
"surname": "Willi",
"fullName": "Simon Willi",
"affiliation": "Disney Research",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anselm",
"surname": "Grundhöfer",
"fullName": "Anselm Grundhöfer",
"affiliation": "Disney Research",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "11",
"pubDate": "2017-11-01 00:00:00",
"pubType": "trans",
"pages": "2430-2439",
"year": "2017",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2016/3641/0/3641a063",
"title": "Practical and Precise Projector-Camera Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2016/3641a063/12OmNB7cjhR",
"parentPublication": {
"id": "proceedings/ismar/2016/3641/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2009/3994/0/05204317",
"title": "Geometric video projector auto-calibration",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2009/05204317/12OmNCxtyKC",
"parentPublication": {
"id": "proceedings/cvprw/2009/3994/0",
"title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dpvt/2006/2825/0/04155728",
"title": "Self-Calibration of Multiple Laser Planes for 3D Scene Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/3dpvt/2006/04155728/12OmNwI8caf",
"parentPublication": {
"id": "proceedings/3dpvt/2006/2825/0",
"title": "Third International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dimpvt/2012/4873/0/4873a464",
"title": "Simple, Accurate, and Robust Projector-Camera Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/3dimpvt/2012/4873a464/12OmNx0RIZY",
"parentPublication": {
"id": "proceedings/3dimpvt/2012/4873/0",
"title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2014/4337/0/4337a024",
"title": "Towards Full Omnidirectional Depth Sensing Using Active Vision for Small Unmanned Aerial Vehicles",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2014/4337a024/12OmNz6iOqk",
"parentPublication": {
"id": "proceedings/crv/2014/4337/0",
"title": "2014 Canadian Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04810996",
"title": "A Distributed Cooperative Framework for Continuous Multi-Projector Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04810996/12OmNzV70vz",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2007/1749/0/04538820",
"title": "Laser Pointer Tracking in Projector-Augmented Architectural Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2007/04538820/12OmNzXnNDt",
"parentPublication": {
"id": "proceedings/ismar/2007/1749/0",
"title": "2007 6th IEEE and ACM International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446433",
"title": "A Calibration Method for Large-Scale Projection Based Floor Display System",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446433/13bd1gJ1v0M",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2004/02/v0177",
"title": "Color Nonuniformity in Projection-Based Displays: Analysis and Solutions",
"doi": null,
"abstractUrl": "/journal/tg/2004/02/v0177/13rRUwfI0PW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007248",
"title": "Simultaneous Projection and Positioning of Laser Projector Pixels",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007248/13rRUxASupD",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08007248",
"articleId": "13rRUxASupD",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08007312",
"articleId": "13rRUwInvyG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXnFwm",
"name": "ttg201711-08007213s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201711-08007213s1.zip",
"extension": "zip",
"size": "8.37 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNz5JC2z",
"title": "Nov.",
"year": "2017",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "23",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwInvyG",
"doi": "10.1109/TVCG.2017.2734428",
"abstract": "Recent publications and art performances demonstrate amazing results using projection mapping. To our knowledge, there exists no multi-projection system that can project onto non-rigid target geometries. This constrains the applicability and quality for live performances with multiple spectators. Given the cost and complexity of current systems, we present a low-cost easy-to-use markerless non-rigid face multi-projection system. It is based on a non-rigid, dense face tracker and a real-time multi-projection solver adapted to imprecise tracking, geometry and calibration. Using this novel system we produce compelling results with only consumer-grade hardware.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Recent publications and art performances demonstrate amazing results using projection mapping. To our knowledge, there exists no multi-projection system that can project onto non-rigid target geometries. This constrains the applicability and quality for live performances with multiple spectators. Given the cost and complexity of current systems, we present a low-cost easy-to-use markerless non-rigid face multi-projection system. It is based on a non-rigid, dense face tracker and a real-time multi-projection solver adapted to imprecise tracking, geometry and calibration. Using this novel system we produce compelling results with only consumer-grade hardware.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Recent publications and art performances demonstrate amazing results using projection mapping. To our knowledge, there exists no multi-projection system that can project onto non-rigid target geometries. This constrains the applicability and quality for live performances with multiple spectators. Given the cost and complexity of current systems, we present a low-cost easy-to-use markerless non-rigid face multi-projection system. It is based on a non-rigid, dense face tracker and a real-time multi-projection solver adapted to imprecise tracking, geometry and calibration. Using this novel system we produce compelling results with only consumer-grade hardware.",
"title": "FaceForge: Markerless Non-Rigid Face Multi-Projection Mapping",
"normalizedTitle": "FaceForge: Markerless Non-Rigid Face Multi-Projection Mapping",
"fno": "08007312",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Face",
"Cameras",
"Target Tracking",
"Computational Modeling",
"Geometry",
"Calibration",
"Image Color Analysis",
"Face Projection",
"Mixed Reality",
"Multi Projection Mapping",
"Non Rigid Face Tracking"
],
"authors": [
{
"givenName": "Christian",
"surname": "Siegl",
"fullName": "Christian Siegl",
"affiliation": "Computer Graphics GroupUniversity of Erlangen/Nuremberg",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Vanessa",
"surname": "Lange",
"fullName": "Vanessa Lange",
"affiliation": "Computer Graphics GroupUniversity of Erlangen/Nuremberg",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Marc",
"surname": "Stamminger",
"fullName": "Marc Stamminger",
"affiliation": "Computer Graphics GroupUniversity of Erlangen/Nuremberg",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Frank",
"surname": "Bauer",
"fullName": "Frank Bauer",
"affiliation": "Computer Graphics GroupUniversity of Erlangen/Nuremberg",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Justus",
"surname": "Thies",
"fullName": "Justus Thies",
"affiliation": "Computer Graphics GroupUniversity of Erlangen/Nuremberg",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "11",
"pubDate": "2017-11-01 00:00:00",
"pubType": "trans",
"pages": "2440-2446",
"year": "2017",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223330",
"title": "Robust high-speed tracking against illumination changes for dynamic projection mapping",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223330/12OmNCdk2JE",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dicta/2010/4271/0/4271a241",
"title": "Non-rigid Face Tracking Using Short Track-Life Features",
"doi": null,
"abstractUrl": "/proceedings-article/dicta/2010/4271a241/12OmNvHGryH",
"parentPublication": {
"id": "proceedings/dicta/2010/4271/0",
"title": "2010 International Conference on Digital Image Computing: Techniques and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/03/07516689",
"title": "Dynamic Projection Mapping onto Deforming Non-Rigid Surface Using Deformable Dot Cluster Marker",
"doi": null,
"abstractUrl": "/journal/tg/2017/03/07516689/13rRUwdIOUR",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/08/07983006",
"title": "FlyCap: Markerless Motion Capture Using Multiple Autonomous Flying Cameras",
"doi": null,
"abstractUrl": "/journal/tg/2018/08/07983006/13rRUxYrbUO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08466021",
"title": "Auto-Calibration for Dynamic Multi-Projection Mapping on Arbitrary Surfaces",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08466021/14M3DYlzziw",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08546201",
"title": "Non-rigid Reconstruction with a Single Moving RGB-D Camera",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08546201/17D45WHONoz",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873957",
"title": "Content-Aware Brightness Solving and Error Mitigation in Large-Scale Multi-Projection Mapping",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873957/1GjwJ0X1ks0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpbd&is/2019/0466/0/08735493",
"title": "A Multi-Patch Network for Non-Rigid Object Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/hpbd&is/2019/08735493/1aPuQy42GXu",
"parentPublication": {
"id": "proceedings/hpbd&is/2019/0466/0",
"title": "2019 International Conference on High Performance Big Data and Intelligent Systems (HPBD&IS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08821571",
"title": "Animated Stickies: Fast Video Projection Mapping onto a Markerless Plane through a Direct Closed-Loop Alignment",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08821571/1d6xCnoQsU0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900b450",
"title": "Neural Deformation Graphs for Globally-consistent Non-rigid Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900b450/1yeJlVNk3bW",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08007213",
"articleId": "13rRUxcsYLX",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08008808",
"articleId": "13rRUxOdD8m",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgCt",
"name": "ttg201711-08007312s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201711-08007312s1.zip",
"extension": "zip",
"size": "15.3 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNz5JC2z",
"title": "Nov.",
"year": "2017",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "23",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxOdD8m",
"doi": "10.1109/TVCG.2017.2734425",
"abstract": "We present a novel real-time approach for user-guided intrinsic decomposition of static scenes captured by an RGB-D sensor. In the first step, we acquire a three-dimensional representation of the scene using a dense volumetric reconstruction framework. The obtained reconstruction serves as a proxy to densely fuse reflectance estimates and to store user-provided constraints in three-dimensional space. User constraints, in the form of constant shading and reflectance strokes, can be placed directly on the real-world geometry using an intuitive touch-based interaction metaphor, or using interactive mouse strokes. Fusing the decomposition results and constraints in three-dimensional space allows for robust propagation of this information to novel views by re-projection. We leverage this information to improve on the decomposition quality of existing intrinsic video decomposition techniques by further constraining the ill-posed decomposition problem. In addition to improved decomposition quality, we show a variety of live augmented reality applications such as recoloring of objects, relighting of scenes and editing of material appearance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a novel real-time approach for user-guided intrinsic decomposition of static scenes captured by an RGB-D sensor. In the first step, we acquire a three-dimensional representation of the scene using a dense volumetric reconstruction framework. The obtained reconstruction serves as a proxy to densely fuse reflectance estimates and to store user-provided constraints in three-dimensional space. User constraints, in the form of constant shading and reflectance strokes, can be placed directly on the real-world geometry using an intuitive touch-based interaction metaphor, or using interactive mouse strokes. Fusing the decomposition results and constraints in three-dimensional space allows for robust propagation of this information to novel views by re-projection. We leverage this information to improve on the decomposition quality of existing intrinsic video decomposition techniques by further constraining the ill-posed decomposition problem. In addition to improved decomposition quality, we show a variety of live augmented reality applications such as recoloring of objects, relighting of scenes and editing of material appearance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a novel real-time approach for user-guided intrinsic decomposition of static scenes captured by an RGB-D sensor. In the first step, we acquire a three-dimensional representation of the scene using a dense volumetric reconstruction framework. The obtained reconstruction serves as a proxy to densely fuse reflectance estimates and to store user-provided constraints in three-dimensional space. User constraints, in the form of constant shading and reflectance strokes, can be placed directly on the real-world geometry using an intuitive touch-based interaction metaphor, or using interactive mouse strokes. Fusing the decomposition results and constraints in three-dimensional space allows for robust propagation of this information to novel views by re-projection. We leverage this information to improve on the decomposition quality of existing intrinsic video decomposition techniques by further constraining the ill-posed decomposition problem. In addition to improved decomposition quality, we show a variety of live augmented reality applications such as recoloring of objects, relighting of scenes and editing of material appearance.",
"title": "Live User-Guided Intrinsic Video for Static Scenes",
"normalizedTitle": "Live User-Guided Intrinsic Video for Static Scenes",
"fno": "08008808",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Augmented Reality",
"Geometry",
"Image Colour Analysis",
"Image Reconstruction",
"Interactive Systems",
"Video Signal Processing",
"Dense Volumetric Reconstruction Framework",
"Three Dimensional Space",
"User Constraints",
"Reflectance Strokes",
"Real World Geometry",
"Interactive Mouse Strokes",
"Decomposition Results",
"Intrinsic Video Decomposition Techniques",
"Improved Decomposition Quality",
"Live Augmented Reality Applications",
"Live User",
"Static Scenes",
"Intrinsic Decomposition",
"RGB D Sensor",
"Three Dimensional Representation",
"Densely Fuse Reflectance Estimation",
"Intuitive Touch Based Interaction Metaphor",
"Streaming Media",
"Image Reconstruction",
"Real Time Systems",
"Geometry",
"Three Dimensional Displays",
"Image Color Analysis",
"Cameras",
"Intrinsic Video Decomposition",
"Reflectance Fusion",
"User Guided Shading Refinement"
],
"authors": [
{
"givenName": "Abhimitra",
"surname": "Meka",
"fullName": "Abhimitra Meka",
"affiliation": "Graphics, Vision and Video Group at MPI for Informatics, Saarbrücken, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Gereon",
"surname": "Fox",
"fullName": "Gereon Fox",
"affiliation": "Saarbrücken Graduate School of Computer Science, Saarland University, Saarbrücken, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Michael",
"surname": "Zollhöfer",
"fullName": "Michael Zollhöfer",
"affiliation": "Graphics, Vision and Video Group at MPI for Informatics, Saarbrücken, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Christian",
"surname": "Richardt",
"fullName": "Christian Richardt",
"affiliation": "University of Bath, Bath, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Christian",
"surname": "Theobalt",
"fullName": "Christian Theobalt",
"affiliation": "Graphics, Vision and Video Group at MPI for Informatics, Saarbrücken, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "11",
"pubDate": "2017-11-01 00:00:00",
"pubType": "trans",
"pages": "2447-2454",
"year": "2017",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2014/4761/0/06890318",
"title": "L0 co-intrinsic images decomposition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2014/06890318/12OmNAoUTnl",
"parentPublication": {
"id": "proceedings/icme/2014/4761/0",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391d469",
"title": "Learning Data-Driven Reflectance Priors for Intrinsic Image Decomposition",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391d469/12OmNBoNrqU",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995507",
"title": "Intrinsic images using optimization",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995507/12OmNCbU3cE",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391a433",
"title": "Intrinsic Decomposition of Image Sequences from Local Temporal Variations",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a433/12OmNzC5Tdg",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/02/ttg2013020210",
"title": "Rich Intrinsic Image Decomposition of Outdoor Scenes from Multiple Views",
"doi": null,
"abstractUrl": "/journal/tg/2013/02/ttg2013020210/13rRUILtJm9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2013/12/ttp2013122904",
"title": "Intrinsic Image Decomposition Using a Sparse Representation of Reflectance",
"doi": null,
"abstractUrl": "/journal/tp/2013/12/ttp2013122904/13rRUxOdD3R",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000g430",
"title": "Multispectral Image Intrinsic Decomposition via Subspace Constraint",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000g430/17D45XeKgro",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900a312",
"title": "HSI-Guided Intrinsic Image Decomposition for Outdoor Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900a312/1G56nWipNPa",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300c521",
"title": "Non-Local Intrinsic Decomposition With Near-Infrared Priors",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300c521/1hVluc7QzBK",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09199573",
"title": "NIID-Net: Adapting Surface Normal Knowledge for Intrinsic Image Decomposition in Indoor Scenes",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09199573/1ncgrpZIBi0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08007312",
"articleId": "13rRUwInvyG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08007219",
"articleId": "13rRUxC0Sw2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRQF",
"name": "ttg201711-08008808s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201711-08008808s1.zip",
"extension": "zip",
"size": "87.9 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNz5JC2z",
"title": "Nov.",
"year": "2017",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "23",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxC0Sw2",
"doi": "10.1109/TVCG.2017.2734578",
"abstract": "We present a real-time method for rendering novel virtual camera views from given RGB-D (color and depth) data of a different viewpoint. Missing color and depth information due to incomplete input or disocclusions is efficiently inpainted in a temporally consistent way. The inpainting takes the location of strong image gradients into account as likely depth discontinuities. We present our method in the context of a view correction system for mobile devices, and discuss how to obtain a screen-camera calibration and options for acquiring depth input. Our method has use cases in both augmented and virtual reality applications. We demonstrate the speed of our system and the visual quality of its results in multiple experiments in the paper as well as in the supplementary video.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a real-time method for rendering novel virtual camera views from given RGB-D (color and depth) data of a different viewpoint. Missing color and depth information due to incomplete input or disocclusions is efficiently inpainted in a temporally consistent way. The inpainting takes the location of strong image gradients into account as likely depth discontinuities. We present our method in the context of a view correction system for mobile devices, and discuss how to obtain a screen-camera calibration and options for acquiring depth input. Our method has use cases in both augmented and virtual reality applications. We demonstrate the speed of our system and the visual quality of its results in multiple experiments in the paper as well as in the supplementary video.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a real-time method for rendering novel virtual camera views from given RGB-D (color and depth) data of a different viewpoint. Missing color and depth information due to incomplete input or disocclusions is efficiently inpainted in a temporally consistent way. The inpainting takes the location of strong image gradients into account as likely depth discontinuities. We present our method in the context of a view correction system for mobile devices, and discuss how to obtain a screen-camera calibration and options for acquiring depth input. Our method has use cases in both augmented and virtual reality applications. We demonstrate the speed of our system and the visual quality of its results in multiple experiments in the paper as well as in the supplementary video.",
"title": "Real-Time View Correction for Mobile Devices",
"normalizedTitle": "Real-Time View Correction for Mobile Devices",
"fno": "08007219",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Cameras",
"Real Time Systems",
"Mobile Handsets",
"Interpolation",
"Pipelines",
"Rendering Computer Graphics",
"Image Color Analysis",
"View Correction",
"Depth Image Based Rendering DIBR",
"Mobile Devices",
"Augmented Reality AR"
],
"authors": [
{
"givenName": "Thomas",
"surname": "Schöps",
"fullName": "Thomas Schöps",
"affiliation": "Department of Computer ScienceETH Zürich",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Martin R.",
"surname": "Oswald",
"fullName": "Martin R. Oswald",
"affiliation": "Department of Computer ScienceETH Zürich",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Pablo",
"surname": "Speciale",
"fullName": "Pablo Speciale",
"affiliation": "Department of Computer ScienceETH Zürich",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shuoran",
"surname": "Yang",
"fullName": "Shuoran Yang",
"affiliation": "Microsoft",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Marc",
"surname": "Pollefeys",
"fullName": "Marc Pollefeys",
"affiliation": "Department of Computer ScienceETH Zürich",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "11",
"pubDate": "2017-11-01 00:00:00",
"pubType": "trans",
"pages": "2455-2462",
"year": "2017",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2014/4308/0/4308a138",
"title": "Dense View Interpolation on Mobile Devices Using Focal Stacks",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2014/4308a138/12OmNAWH9Je",
"parentPublication": {
"id": "proceedings/cvprw/2014/4308/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscsct/2008/3498/2/3498b206",
"title": "A Color Correction Algorithm of Multi-view Video Based on Depth Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/iscsct/2008/3498b206/12OmNBTs7Be",
"parentPublication": {
"id": "proceedings/iscsct/2008/3498/1",
"title": "2008 International Symposium on Computer Science and Computational Technology (ISCSCT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cccm/2008/3290/1/3290a458",
"title": "A Color Error Correction Mode for Digital Camera Based on Polynomial Curve Generation",
"doi": null,
"abstractUrl": "/proceedings-article/cccm/2008/3290a458/12OmNBp52uZ",
"parentPublication": {
"id": "cccm/2008/3290/1",
"title": "Computing, Communication, Control and Management, ISECS International Colloquium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2015/7079/0/07169773",
"title": "A disocclusion filling method using multiple sprites with depth for virtual view synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2015/07169773/12OmNs0C9Ua",
"parentPublication": {
"id": "proceedings/icmew/2015/7079/0",
"title": "2015 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460992",
"title": "Faithful Spatio-Temporal disocclusion filling using local optimization",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460992/12OmNxuFBpr",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007317",
"title": "Natural Environment Illumination: Coherent Interactive Augmented Reality for Mobile and Non-Mobile Devices",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007317/13rRUILc8fg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2022/0915/0/091500d093",
"title": "Revealing Disocclusions in Temporal View Synthesis through Infilling Vector Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500d093/1B13BrNvA40",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200m2558",
"title": "MINE: Towards Continuous Depth MPI with NeRF for Novel View Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200m2558/1BmI1xNvy12",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09928218",
"title": "Metameric Inpainting for Image Warping",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09928218/1HJuJYF342Y",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/10/09184389",
"title": "InpaintFusion: Incremental RGB-D Inpainting for 3D Scenes",
"doi": null,
"abstractUrl": "/journal/tg/2020/10/09184389/1mLIesC5z0Y",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08008808",
"articleId": "13rRUxOdD8m",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08007218",
"articleId": "13rRUxcbnHi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNz5JC2z",
"title": "Nov.",
"year": "2017",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "23",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxcbnHi",
"doi": "10.1109/TVCG.2017.2734427",
"abstract": "We propose an occlusion compensation method for optical see-through head-mounted displays (OST-HMDs) equipped with a singlelayer transmissive spatial light modulator (SLM), in particular, a liquid crystal display (LCD). Occlusion is an important depth cue for 3D perception, yet realizing it on OST-HMDs is particularly difficult due to the displays' semitransparent nature. A key component for the occlusion support is the SLM—a device that can selectively interfere with light rays passing through it. For example, an LCD is a transmissive SLM that can block or pass incoming light rays by turning pixels black or transparent. A straightforward solution places an LCD in front of an OST-HMD and drives the LCD to block light rays that could pass through rendered virtual objects at the viewpoint. This simple approach is, however, defective due to the depth mismatch between the LCD panel and the virtual objects, leading to blurred occlusion. This led existing OST-HMDs to employ dedicated hardware such as focus optics and multi-stacked SLMs. Contrary to these viable, yet complex and/or computationally expensive solutions, we return to the single-layer LCD approach for the hardware simplicity while maintaining fine occlusion—we compensate for a degraded occlusion area by overlaying a compensation image. We compute the image based on the HMD parameters and the background scene captured by a scene camera. The evaluation demonstrates that the proposed method reduced the occlusion leak error by 61.4% and the occlusion error by 85.7%.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose an occlusion compensation method for optical see-through head-mounted displays (OST-HMDs) equipped with a singlelayer transmissive spatial light modulator (SLM), in particular, a liquid crystal display (LCD). Occlusion is an important depth cue for 3D perception, yet realizing it on OST-HMDs is particularly difficult due to the displays' semitransparent nature. A key component for the occlusion support is the SLM—a device that can selectively interfere with light rays passing through it. For example, an LCD is a transmissive SLM that can block or pass incoming light rays by turning pixels black or transparent. A straightforward solution places an LCD in front of an OST-HMD and drives the LCD to block light rays that could pass through rendered virtual objects at the viewpoint. This simple approach is, however, defective due to the depth mismatch between the LCD panel and the virtual objects, leading to blurred occlusion. This led existing OST-HMDs to employ dedicated hardware such as focus optics and multi-stacked SLMs. Contrary to these viable, yet complex and/or computationally expensive solutions, we return to the single-layer LCD approach for the hardware simplicity while maintaining fine occlusion—we compensate for a degraded occlusion area by overlaying a compensation image. We compute the image based on the HMD parameters and the background scene captured by a scene camera. The evaluation demonstrates that the proposed method reduced the occlusion leak error by 61.4% and the occlusion error by 85.7%.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose an occlusion compensation method for optical see-through head-mounted displays (OST-HMDs) equipped with a singlelayer transmissive spatial light modulator (SLM), in particular, a liquid crystal display (LCD). Occlusion is an important depth cue for 3D perception, yet realizing it on OST-HMDs is particularly difficult due to the displays' semitransparent nature. A key component for the occlusion support is the SLM—a device that can selectively interfere with light rays passing through it. For example, an LCD is a transmissive SLM that can block or pass incoming light rays by turning pixels black or transparent. A straightforward solution places an LCD in front of an OST-HMD and drives the LCD to block light rays that could pass through rendered virtual objects at the viewpoint. This simple approach is, however, defective due to the depth mismatch between the LCD panel and the virtual objects, leading to blurred occlusion. This led existing OST-HMDs to employ dedicated hardware such as focus optics and multi-stacked SLMs. Contrary to these viable, yet complex and/or computationally expensive solutions, we return to the single-layer LCD approach for the hardware simplicity while maintaining fine occlusion—we compensate for a degraded occlusion area by overlaying a compensation image. We compute the image based on the HMD parameters and the background scene captured by a scene camera. The evaluation demonstrates that the proposed method reduced the occlusion leak error by 61.4% and the occlusion error by 85.7%.",
"title": "Occlusion Leak Compensation for Optical See-Through Displays Using a Single-Layer Transmissive Spatial Light Modulator",
"normalizedTitle": "Occlusion Leak Compensation for Optical See-Through Displays Using a Single-Layer Transmissive Spatial Light Modulator",
"fno": "08007218",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Liquid Crystal Displays",
"Cameras",
"Optical Imaging",
"Hardware",
"Lenses",
"Glass",
"Image Color Analysis",
"Occlusion Support",
"Optical See Through HMD",
"Occlusion Leak",
"Spatial Light Modulator",
"Depth Cue"
],
"authors": [
{
"givenName": "Yuta",
"surname": "Itoh",
"fullName": "Yuta Itoh",
"affiliation": "Keio University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Takumi",
"surname": "Hamasaki",
"fullName": "Takumi Hamasaki",
"affiliation": "Keio University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Maki",
"surname": "Sugimoto",
"fullName": "Maki Sugimoto",
"affiliation": "Keio University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "11",
"pubDate": "2017-11-01 00:00:00",
"pubType": "trans",
"pages": "2463-2473",
"year": "2017",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2012/4660/0/06402574",
"title": "Occlusion capable optical see-through head-mounted display using freeform optics",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402574/12OmNBEpnEt",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08052554",
"title": "A Survey of Calibration Methods for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08052554/13rRUILtJqY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/11/07165643",
"title": "Semi-Parametric Color Reproduction Method for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/11/07165643/13rRUILtJzB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08676153",
"title": "Light Attenuation Display: Subtractive See-Through Near-Eye Display via Spatial Color Filtering",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08676153/18LFbQfp6x2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08676155",
"title": "Varifocal Occlusion for Optical See-Through Head-Mounted Displays using a Slide Occlusion Mask",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08676155/18LFfGhc49i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a800",
"title": "Add-on Occlusion: An External Module for Optical See-through Augmented Reality Displays to Support Mutual Occlusion",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a800/1CJeADcapNK",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10050791",
"title": "Add-on Occlusion: Turning Off-the-Shelf Optical See-through Head-mounted Displays Occlusion-capable",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10050791/1L039oS5wDm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998139",
"title": "Factored Occlusion: Single Spatial Light Modulator Occlusion-capable Optical See-through Augmented Reality Display",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998139/1hrXe0Hbv0I",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09429918",
"title": "The Impact of Focus and Context Visualization Techniques on Depth Perception in Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09429918/1txPs5wi56E",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09463728",
"title": "Color Contrast Enhanced Rendering for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09463728/1uFxo1ImlpK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08007219",
"articleId": "13rRUxC0Sw2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08007317",
"articleId": "13rRUILc8fg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRKF",
"name": "ttg201711-08007218s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201711-08007218s1.zip",
"extension": "zip",
"size": "25.1 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNz5JC2z",
"title": "Nov.",
"year": "2017",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "23",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUILc8fg",
"doi": "10.1109/TVCG.2017.2734426",
"abstract": "Augmented Reality offers many applications today, especially on mobile devices. Due to the lack of mobile hardware for illumination measurements, photorealistic rendering with consistent appearance of virtual objects is still an area of active research. In this paper, we present a full two-stage pipeline for environment acquisition and augmentation of live camera images using a mobile device with a depth sensor. We show how to directly work on a recorded 3D point cloud of the real environment containing high dynamic range color values. For unknown and automatically changing camera settings, a color compensation method is introduced. Based on this, we show photorealistic augmentations using variants of differential light simulation techniques. The presented methods are tailored for mobile devices and run at interactive frame rates. However, our methods are scalable to trade performance for quality and can produce quality renderings on desktop hardware.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Augmented Reality offers many applications today, especially on mobile devices. Due to the lack of mobile hardware for illumination measurements, photorealistic rendering with consistent appearance of virtual objects is still an area of active research. In this paper, we present a full two-stage pipeline for environment acquisition and augmentation of live camera images using a mobile device with a depth sensor. We show how to directly work on a recorded 3D point cloud of the real environment containing high dynamic range color values. For unknown and automatically changing camera settings, a color compensation method is introduced. Based on this, we show photorealistic augmentations using variants of differential light simulation techniques. The presented methods are tailored for mobile devices and run at interactive frame rates. However, our methods are scalable to trade performance for quality and can produce quality renderings on desktop hardware.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Augmented Reality offers many applications today, especially on mobile devices. Due to the lack of mobile hardware for illumination measurements, photorealistic rendering with consistent appearance of virtual objects is still an area of active research. In this paper, we present a full two-stage pipeline for environment acquisition and augmentation of live camera images using a mobile device with a depth sensor. We show how to directly work on a recorded 3D point cloud of the real environment containing high dynamic range color values. For unknown and automatically changing camera settings, a color compensation method is introduced. Based on this, we show photorealistic augmentations using variants of differential light simulation techniques. The presented methods are tailored for mobile devices and run at interactive frame rates. However, our methods are scalable to trade performance for quality and can produce quality renderings on desktop hardware.",
"title": "Natural Environment Illumination: Coherent Interactive Augmented Reality for Mobile and Non-Mobile Devices",
"normalizedTitle": "Natural Environment Illumination: Coherent Interactive Augmented Reality for Mobile and Non-Mobile Devices",
"fno": "08007317",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Cameras",
"Image Color Analysis",
"Three Dimensional Displays",
"Rendering Computer Graphics",
"Lighting",
"Image Reconstruction",
"Estimation",
"Augmented Reality",
"Mixed Reality",
"Differential Rendering",
"Color Compensation",
"Impostors Tracing",
"GPU Importance Sampling",
"Mobile AR",
"Scene Reconstruction",
"Light Estimation",
"Material Estimation",
"Depth Sensing",
"Point Clouds",
"Global Illumination"
],
"authors": [
{
"givenName": "Kai",
"surname": "Rohmer",
"fullName": "Kai Rohmer",
"affiliation": "Graphical Data Processing and Multimedia Group, TU Clausthal, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Johannes",
"surname": "Jendersie",
"fullName": "Johannes Jendersie",
"affiliation": "Graphical Data Processing and Multimedia Group, TU Clausthal, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Thorsten",
"surname": "Grosch",
"fullName": "Thorsten Grosch",
"affiliation": "Graphical Data Processing and Multimedia Group, TU Clausthal, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "11",
"pubDate": "2017-11-01 00:00:00",
"pubType": "trans",
"pages": "2474-2484",
"year": "2017",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2014/6184/0/06948406",
"title": "Interactive near-field illumination for photorealistic augmented reality on mobile devices",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948406/12OmNAGNCfe",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2016/0806/0/07550918",
"title": "A mosaic style rendering method based on fuzzy color modeling",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2016/07550918/12OmNrAMF1Y",
"parentPublication": {
"id": "proceedings/icis/2016/0806/0",
"title": "2016 IEEE/ACIS 15th International Conference on Computer and Information Science (ICIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2010/7846/0/05571370",
"title": "Preserving Coherent Illumination in Style Transfer Functions for Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2010/05571370/12OmNwF0BUx",
"parentPublication": {
"id": "proceedings/iv/2010/7846/0",
"title": "2010 14th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223334",
"title": "Image-space illumination for augmented reality in dynamic environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223334/12OmNyFU73E",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802055",
"title": "Global illumination for Augmented Reality on mobile phones",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802055/12OmNyRg4FC",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2010/6237/0/05444836",
"title": "Photorealistic rendering for Augmented Reality: A global illumination and BRDF solution",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2010/05444836/12OmNz6iOaA",
"parentPublication": {
"id": "proceedings/vr/2010/6237/0",
"title": "2010 IEEE Virtual Reality Conference (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/12/07138641",
"title": "Interactive Near-Field Illumination for Photorealistic Augmented Reality with Varying Materials on Mobile Devices",
"doi": null,
"abstractUrl": "/journal/tg/2015/12/07138641/13rRUNvgz4i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699239",
"title": "Reproducing Material Appearance of Real Objects Using Mobile Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699239/19F1QemV928",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699176",
"title": "Reproducing Material Appearance of Real Objects Using Mobile Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699176/19F1ToU9wNG",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2021/4065/0/406500a017",
"title": "Illumination-aware Digital Image Compositing for Full-length Human Figures",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2021/406500a017/1yBEZLFBWGA",
"parentPublication": {
"id": "proceedings/cw/2021/4065/0",
"title": "2021 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08007218",
"articleId": "13rRUxcbnHi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08007318",
"articleId": "13rRUxOve9O",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTYet1R",
"name": "ttg201711-08007317s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201711-08007317s1.zip",
"extension": "zip",
"size": "74.6 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNz5JC2z",
"title": "Nov.",
"year": "2017",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "23",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxOve9O",
"doi": "10.1109/TVCG.2017.2734538",
"abstract": "Predicting specularities in images, given the camera pose and scene geometry from SLAM, forms a challenging and open problem. It is nonetheless essential in several applications such as retexturing. A recent geometric model called JOLIMAS partially answers this problem, under the assumptions that the specularities are elliptical and the scene is planar. JOLIMAS models a moving specularity as the image of a fixed 3D quadric. We propose dual JOLIMAS, a new model which raises the planarity assumption. It uses the fact that specularities remain elliptical on convex surfaces and that every surface can be divided in convex parts. The geometry of dual JOLIMAS then uses a 3D quadric per convex surface part and light source, and predicts the specularities by a means of virtual cameras, allowing it to cope with surface's unflatness. We assessed the efficiency and precision of dual JOLIMAS on multiple synthetic and real videos with various objects and lighting conditions. We give results of a retexturing application. Further results are presented as supplementary video material.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Predicting specularities in images, given the camera pose and scene geometry from SLAM, forms a challenging and open problem. It is nonetheless essential in several applications such as retexturing. A recent geometric model called JOLIMAS partially answers this problem, under the assumptions that the specularities are elliptical and the scene is planar. JOLIMAS models a moving specularity as the image of a fixed 3D quadric. We propose dual JOLIMAS, a new model which raises the planarity assumption. It uses the fact that specularities remain elliptical on convex surfaces and that every surface can be divided in convex parts. The geometry of dual JOLIMAS then uses a 3D quadric per convex surface part and light source, and predicts the specularities by a means of virtual cameras, allowing it to cope with surface's unflatness. We assessed the efficiency and precision of dual JOLIMAS on multiple synthetic and real videos with various objects and lighting conditions. We give results of a retexturing application. Further results are presented as supplementary video material.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Predicting specularities in images, given the camera pose and scene geometry from SLAM, forms a challenging and open problem. It is nonetheless essential in several applications such as retexturing. A recent geometric model called JOLIMAS partially answers this problem, under the assumptions that the specularities are elliptical and the scene is planar. JOLIMAS models a moving specularity as the image of a fixed 3D quadric. We propose dual JOLIMAS, a new model which raises the planarity assumption. It uses the fact that specularities remain elliptical on convex surfaces and that every surface can be divided in convex parts. The geometry of dual JOLIMAS then uses a 3D quadric per convex surface part and light source, and predicts the specularities by a means of virtual cameras, allowing it to cope with surface's unflatness. We assessed the efficiency and precision of dual JOLIMAS on multiple synthetic and real videos with various objects and lighting conditions. We give results of a retexturing application. Further results are presented as supplementary video material.",
"title": "A Multiple-View Geometric Model of Specularities on Non-Planar Shapes with Application to Dynamic Retexturing",
"normalizedTitle": "A Multiple-View Geometric Model of Specularities on Non-Planar Shapes with Application to Dynamic Retexturing",
"fno": "08007318",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Light Sources",
"Cameras",
"Three Dimensional Displays",
"Surface Reconstruction",
"Shape",
"Geometry",
"Image Reconstruction",
"Specularity Prediction",
"Augmented Reality",
"Retexturing",
"Quadric",
"Multiple Light Sources"
],
"authors": [
{
"givenName": "Alexandre",
"surname": "Morgand",
"fullName": "Alexandre Morgand",
"affiliation": "CEA, LIST, Gif-sur-Yvette, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mohamed",
"surname": "Tamaazousti",
"fullName": "Mohamed Tamaazousti",
"affiliation": "CEALIST",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Adrien",
"surname": "Bartoli",
"fullName": "Adrien Bartoli",
"affiliation": "IP",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "11",
"pubDate": "2017-11-01 00:00:00",
"pubType": "trans",
"pages": "2485-2493",
"year": "2017",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2012/4660/0/06402544",
"title": "Real-time surface light-field capture for augmentation of planar specular surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402544/12OmNASILPn",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2013/2840/0/2840c504",
"title": "Multi-view Normal Field Integration for 3D Reconstruction of Mirroring Objects",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2013/2840c504/12OmNAoUTgY",
"parentPublication": {
"id": "proceedings/iccv/2013/2840/0",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/worv/2013/5646/0/06521920",
"title": "Near surface light source estimation from a single view image",
"doi": null,
"abstractUrl": "/proceedings-article/worv/2013/06521920/12OmNBVrjoU",
"parentPublication": {
"id": "proceedings/worv/2013/5646/0",
"title": "2013 IEEE Workshop on Robot Vision (WORV 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2016/3641/0/3641a044",
"title": "An Empirical Model for Specularity Prediction with Application to Dynamic Retexturing",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2016/3641a044/12OmNCd2rxc",
"parentPublication": {
"id": "proceedings/ismar/2016/3641/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671772",
"title": "Delta Light Propagation Volumes for mixed reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671772/12OmNwkhTdN",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2015/9711/0/5720a175",
"title": "Surface Recovery: Fusion of Image and Point Cloud",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2015/5720a175/12OmNxE2mUe",
"parentPublication": {
"id": "proceedings/iccvw/2015/9711/0",
"title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fcst/2010/7779/0/05575931",
"title": "User-Controlled Geometric Feature Preserving Simplification",
"doi": null,
"abstractUrl": "/proceedings-article/fcst/2010/05575931/12OmNyNQSQn",
"parentPublication": {
"id": "proceedings/fcst/2010/7779/0",
"title": "2010 Fifth International Conference on Frontier of Computer Science and Technology (FCST 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ccv/1988/0883/0/00590016",
"title": "Geometry From Specularities",
"doi": null,
"abstractUrl": "/proceedings-article/ccv/1988/00590016/12OmNylsZA6",
"parentPublication": {
"id": "proceedings/ccv/1988/0883/0",
"title": "1988 Second International Conference on Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/05/07869421",
"title": "A Geometric Model for Specularity Prediction on Planar Surfaces with Multiple Light Sources",
"doi": null,
"abstractUrl": "/journal/tg/2018/05/07869421/13rRUwdIOUT",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300b052",
"title": "A Differential Volumetric Approach to Multi-View Photometric Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300b052/1hVlAZv5zfG",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08007317",
"articleId": "13rRUILc8fg",
"__typename": "AdjacentArticleType"
},
"next": null,
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRQj",
"name": "ttg201711-08007318s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201711-08007318s1.zip",
"extension": "zip",
"size": "58.7 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyaoDzi",
"title": "April",
"year": "2012",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUypp57B",
"doi": "10.1109/TVCG.2012.59",
"abstract": "Presents the title page of the issue.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Presents the title page of the issue.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Presents the title page of the issue.",
"title": "[Title page]",
"normalizedTitle": "[Title page]",
"fno": "ttg201204000i",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "04",
"pubDate": "2012-04-01 00:00:00",
"pubType": "trans",
"pages": "i-ii",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "ttg2012040iii",
"articleId": "13rRUwfZC0e",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyaoDzi",
"title": "April",
"year": "2012",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwfZC0e",
"doi": "10.1109/TVCG.2012.57",
"abstract": null,
"abstracts": [
{
"abstractType": "Regular",
"content": "",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": null,
"title": "Table of Contents",
"normalizedTitle": "Table of Contents",
"fno": "ttg2012040iii",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "04",
"pubDate": "2012-04-01 00:00:00",
"pubType": "trans",
"pages": "iii-iv",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "ttg201204000i",
"articleId": "13rRUypp57B",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg201204000v",
"articleId": "13rRUwbs2b0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyaoDzi",
"title": "April",
"year": "2012",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwbs2b0",
"doi": "10.1109/TVCG.2012.50",
"abstract": null,
"abstracts": [
{
"abstractType": "Regular",
"content": "",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": null,
"title": "Message from the Editor-in-Chief",
"normalizedTitle": "Message from the Editor-in-Chief",
"fno": "ttg201204000v",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Ming C.",
"surname": "Lin",
"fullName": "Ming C. Lin",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "04",
"pubDate": "2012-04-01 00:00:00",
"pubType": "trans",
"pages": "v-v",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2016/01/07307929",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07307929/13rRUIIVlkl",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2016/01/07423841",
"title": "Message From the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/ec/2016/01/07423841/13rRUwgQpvG",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg20111200ix",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg20111200ix/13rRUwjoNx0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07064831",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07064831/13rRUxBa5no",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2017/01/07870827",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/ec/2017/01/07870827/13rRUxE04mk",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08165928",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08165928/13rRUxly8T3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07572705",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07572705/13rRUyp7tX0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg201006000x",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg201006000x/13rRUytF41v",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855103",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855103/1dNHm0Dq8lG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254193",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254193/1oDXLUaRaDK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2012040iii",
"articleId": "13rRUwfZC0e",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg20120400vi",
"articleId": "13rRUxly9dS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyaoDzi",
"title": "April",
"year": "2012",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxly9dS",
"doi": "10.1109/TVCG.2012.51",
"abstract": "The articles in this special issue contain the full paper proceedings of the IEEE Virtual Reality Conference 2012 (IEEE VR 2012), held March 4-8, 2012 in Orange County, California.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The articles in this special issue contain the full paper proceedings of the IEEE Virtual Reality Conference 2012 (IEEE VR 2012), held March 4-8, 2012 in Orange County, California.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The articles in this special issue contain the full paper proceedings of the IEEE Virtual Reality Conference 2012 (IEEE VR 2012), held March 4-8, 2012 in Orange County, California.",
"title": "Message from the Paper Chairs and Guest Editors",
"normalizedTitle": "Message from the Paper Chairs and Guest Editors",
"fno": "ttg20120400vi",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Special Issues And Sections",
"Meetings",
"Virtual Reality"
],
"authors": [
{
"givenName": "Sabine",
"surname": "Coquillart",
"fullName": "Sabine Coquillart",
"affiliation": "INRIA, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Steven",
"surname": "Feiner",
"fullName": "Steven Feiner",
"affiliation": "Columbia University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kiyoshi",
"surname": "Kiyokawa",
"fullName": "Kiyoshi Kiyokawa",
"affiliation": "Osaka University, Japan",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "04",
"pubDate": "2012-04-01 00:00:00",
"pubType": "trans",
"pages": "vi",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2013/04/ttg2013040000vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2013/04/ttg2013040000vi/13rRUILtJma",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/06/ttg2013060898",
"title": "Guest Editors' Introduction: Special Section on the IEEE Pacific Visualization Symposium 2012",
"doi": null,
"abstractUrl": "/journal/tg/2013/06/ttg2013060898/13rRUNvgziD",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg2014040vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg2014040vi/13rRUwI5Ug9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07064835",
"title": "Message from the VR Program Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07064835/13rRUwh80uB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06935059",
"title": "Message from the VIS Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06935059/13rRUxBa564",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg201212000x",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg201212000x/13rRUxYIN49",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09754286",
"title": "IEEE VR 2022 Message from the Journal Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09754286/1Cpd7Bwusk8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09927176",
"title": "Message from the ISMAR 2022 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09927176/1HGJ8mlD3S8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09405530",
"title": "Message from the Program Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09405530/1sP1eDRuGMU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09591492",
"title": "Message from the ISMAR 2021 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09591492/1y2FvGMxBuM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg201204000v",
"articleId": "13rRUwbs2b0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2012040vii",
"articleId": "13rRUNvgz9K",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyaoDzi",
"title": "April",
"year": "2012",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUNvgz9K",
"doi": "10.1109/TVCG.2012.39",
"abstract": null,
"abstracts": [
{
"abstractType": "Regular",
"content": "",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": null,
"title": "Committees",
"normalizedTitle": "Committees",
"fno": "ttg2012040vii",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "04",
"pubDate": "2012-04-01 00:00:00",
"pubType": "trans",
"pages": "vii-ix",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "ttg20120400vi",
"articleId": "13rRUxly9dS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg201204000x",
"articleId": "13rRUNvgz9J",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyaoDzi",
"title": "April",
"year": "2012",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUNvgz9J",
"doi": "10.1109/TVCG.2012.54",
"abstract": null,
"abstracts": [
{
"abstractType": "Regular",
"content": "",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": null,
"title": "Paper Reviewers",
"normalizedTitle": "Paper Reviewers",
"fno": "ttg201204000x",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "04",
"pubDate": "2012-04-01 00:00:00",
"pubType": "trans",
"pages": "x-x",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "ttg2012040vii",
"articleId": "13rRUNvgz9K",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg20120400xi",
"articleId": "13rRUwkfAZf",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyaoDzi",
"title": "April",
"year": "2012",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwkfAZf",
"doi": "10.1109/TVCG.2012.49",
"abstract": null,
"abstracts": [
{
"abstractType": "Regular",
"content": "",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": null,
"title": "Keynote Speaker: Taking the \"Virtual\" Out of Virtual Reality",
"normalizedTitle": "Keynote Speaker: Taking the \"Virtual\" Out of Virtual Reality",
"fno": "ttg20120400xi",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Carlo H.",
"surname": "Séquin",
"fullName": "Carlo H. Séquin",
"affiliation": "University of California, Berkeley",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "04",
"pubDate": "2012-04-01 00:00:00",
"pubType": "trans",
"pages": "xi-xi",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/colcom/2007/1318/0/04553882",
"title": "Keynote speaker",
"doi": null,
"abstractUrl": "/proceedings-article/colcom/2007/04553882/12OmNASILUZ",
"parentPublication": {
"id": "proceedings/colcom/2007/1318/0",
"title": "International Conference on Collaborative Computing: Networking, Applications and Worksharing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2012/1247/0/06180863",
"title": "Keynote presentation: Taking the \"virtual\" out of virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2012/06180863/12OmNB9t6vQ",
"parentPublication": {
"id": "proceedings/vr/2012/1247/0",
"title": "Virtual Reality Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mse/2011/0548/0/05937072",
"title": "Keynote Speaker",
"doi": null,
"abstractUrl": "/proceedings-article/mse/2011/05937072/12OmNBQkx3o",
"parentPublication": {
"id": "proceedings/mse/2011/0548/0",
"title": "2011 IEEE International Conference on Microelectronic Systems Education (MSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hase/2001/1275/0/12750004",
"title": "Keynote Speaker",
"doi": null,
"abstractUrl": "/proceedings-article/hase/2001/12750004/12OmNCd2rF1",
"parentPublication": {
"id": "proceedings/hase/2001/1275/0",
"title": "Proceedings Sixth IEEE International Symposium on High Assurance Systems Engineering. Special Topic: Impact of Networking",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2015/6879/0/07156348",
"title": "Keynote speaker",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2015/07156348/12OmNvlPkAl",
"parentPublication": {
"id": "proceedings/pacificvis/2015/6879/0",
"title": "2015 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/taicpart-mutation/2007/2984/0/2984xxii",
"title": "Keynote Speaker",
"doi": null,
"abstractUrl": "/proceedings-article/taicpart-mutation/2007/2984xxii/12OmNyOHFZr",
"parentPublication": {
"id": "proceedings/taicpart-mutation/2007/2984/0",
"title": "Testing: Academic and Industrial Conference Practice and Research Techniques - MUTATION",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/colcom/2007/1318/0/04553881",
"title": "Keynote speaker",
"doi": null,
"abstractUrl": "/proceedings-article/colcom/2007/04553881/12OmNzuZUqn",
"parentPublication": {
"id": "proceedings/colcom/2007/1318/0",
"title": "International Conference on Collaborative Computing: Networking, Applications and Worksharing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500z025",
"title": "Keynote Speaker: Digital Humans in Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500z025/1MNgtJP55y8",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2022/9744/0/974400z036",
"title": "Keynote Speaker",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2022/974400z036/1MrFQvu4EFi",
"parentPublication": {
"id": "proceedings/ictai/2022/9744/0",
"title": "2022 IEEE 34th International Conference on Tools with Artificial Intelligence (ICTAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg201204000x",
"articleId": "13rRUNvgz9J",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2012040xii",
"articleId": "13rRUIM2VBE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyaoDzi",
"title": "April",
"year": "2012",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUIM2VBE",
"doi": "10.1109/TVCG.2012.38",
"abstract": null,
"abstracts": [
{
"abstractType": "Regular",
"content": "",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": null,
"title": "Capstone Speaker",
"normalizedTitle": "Capstone Speaker",
"fno": "ttg2012040xii",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "James J.",
"surname": "Blascovich",
"fullName": "James J. Blascovich",
"affiliation": "Professor, Psychology University of California, Santa Barbara",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "04",
"pubDate": "2012-04-01 00:00:00",
"pubType": "trans",
"pages": "xii-xii",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2007/1179/0/04269968",
"title": "Capstone Talk",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2007/04269968/12OmNAXxWVj",
"parentPublication": {
"id": "proceedings/cvpr/2007/1179/0",
"title": "2007 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2015/9783/0/07347623",
"title": "VIS capstone address",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2015/07347623/12OmNBSSVi0",
"parentPublication": {
"id": "proceedings/vast/2015/9783/0",
"title": "2015 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-infovis/1998/9093/0/9093xii",
"title": "Capstone Address",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/1998/9093xii/12OmNBhZ4rc",
"parentPublication": {
"id": "proceedings/ieee-infovis/1998/9093/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2016/5661/0/07883505",
"title": "VIS capstone address",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2016/07883505/12OmNxFJXuy",
"parentPublication": {
"id": "proceedings/vast/2016/5661/0",
"title": "2016 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504680",
"title": "Capstone speaker: Agents? Seriously",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504680/12OmNzV70mm",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg20100600xxv",
"title": "VisWeek Capstone Address",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg20100600xxv/13rRUx0gefi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg201212xxii",
"title": "VisWeek 2012 Capstone Speaker",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg201212xxii/13rRUyY294C",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/12/ttg201312xxviii",
"title": "VIS 2013 Capstone Speaker: Information Visualization: Challenges and Opportunities",
"doi": null,
"abstractUrl": "/journal/tg/2013/12/ttg201312xxviii/13rRUygBwhJ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg201112xxiv",
"title": "VisWeek Capstone Address",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg201112xxiv/13rRUynHuj6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scivis/2018/6882/0/08823613",
"title": "SciVis 2018 Capstone Address",
"doi": null,
"abstractUrl": "/proceedings-article/scivis/2018/08823613/1d5kwNLsKhW",
"parentPublication": {
"id": "proceedings/scivis/2018/6882/0",
"title": "2018 IEEE Scientific Visualization Conference (SciVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg20120400xi",
"articleId": "13rRUwkfAZf",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg201204xiii",
"articleId": "13rRUy2YLYt",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyaoDzi",
"title": "April",
"year": "2012",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUy2YLYt",
"doi": "10.1109/TVCG.2012.37",
"abstract": null,
"abstracts": [
{
"abstractType": "Regular",
"content": "",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": null,
"title": "Banquet Speaker: What's Next?: The Third Wave in Computer Graphics and Interactive Techniques",
"normalizedTitle": "Banquet Speaker: What's Next?: The Third Wave in Computer Graphics and Interactive Techniques",
"fno": "ttg201204xiii",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "David J.",
"surname": "Kasik",
"fullName": "David J. Kasik",
"affiliation": "Boeing Senior Technical Fellow",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "04",
"pubDate": "2012-04-01 00:00:00",
"pubType": "trans",
"pages": "xiii-xiv",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2012/1247/0/06180864",
"title": "Banquet presentation: What's next?: The third wave in computer graphics and interactive techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2012/06180864/12OmNCf1DlJ",
"parentPublication": {
"id": "proceedings/vr/2012/1247/0",
"title": "Virtual Reality Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/uksim/2016/0888/0/07796676",
"title": "Keynote Speaker I: Computing for Big Science: Gravitational Wave Detection",
"doi": null,
"abstractUrl": "/proceedings-article/uksim/2016/07796676/12OmNyUWR0x",
"parentPublication": {
"id": "proceedings/uksim/2016/0888/0",
"title": "2016 UKSim-AMSS 18th International Conference on Computer Modelling and Simulation (UKSim)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ftcs/1999/0213/0/0213xvii",
"title": "Banquet Speaker: Harrison Hagan Schmitt",
"doi": null,
"abstractUrl": "/proceedings-article/ftcs/1999/0213xvii/12OmNz2kqef",
"parentPublication": {
"id": "proceedings/ftcs/1999/0213/0",
"title": "Fault-Tolerant Computing, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlsid/2010/3928/0/3928z037",
"title": "Banquet Talk",
"doi": null,
"abstractUrl": "/proceedings-article/vlsid/2010/3928z037/12OmNzFdt6m",
"parentPublication": {
"id": "proceedings/vlsid/2010/3928/0",
"title": "VLSI Design, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/it/2005/02/f2014",
"title": "Web Services: What's Real and What's Not?",
"doi": null,
"abstractUrl": "/magazine/it/2005/02/f2014/13rRUEgarxx",
"parentPublication": {
"id": "mags/it",
"title": "IT Professional",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ic/2003/01/w1013",
"title": "Getting on the Third Wave",
"doi": null,
"abstractUrl": "/magazine/ic/2003/01/w1013/13rRUEgs2HF",
"parentPublication": {
"id": "mags/ic",
"title": "IEEE Internet Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/1996/05/s5004",
"title": "What's Practical",
"doi": null,
"abstractUrl": "/magazine/so/1996/05/s5004/13rRUwjGoE5",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2011/04/mcg2011040089",
"title": "The Third Wave in Computer Graphics and Interactive Techniques",
"doi": null,
"abstractUrl": "/magazine/cg/2011/04/mcg2011040089/13rRUxZRbrc",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2010/03/mmu2010030008",
"title": "Achieving Ubiquity: The New Third Wave",
"doi": null,
"abstractUrl": "/magazine/mu/2010/03/mmu2010030008/13rRUxjQyrS",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2000/03/u3005",
"title": "What's New",
"doi": null,
"abstractUrl": "/magazine/mu/2000/03/u3005/13rRUy3xYcq",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2012040xii",
"articleId": "13rRUIM2VBE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2012040529",
"articleId": "13rRUxjQypb",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyaoDzi",
"title": "April",
"year": "2012",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxjQypb",
"doi": "10.1109/TVCG.2012.52",
"abstract": "Distributed virtual environments (DVEs) are becoming very popular in recent years, due to the rapid growing of applications, such as massive multiplayer online games (MMOGs). As the number of concurrent users increases, scalability becomes one of the major challenges in designing an interactive DVE system. One solution to address this scalability problem is to adopt a multi-server architecture. While some methods focus on the quality of partitioning the load among the servers, others focus on the efficiency of the partitioning process itself. However, all these methods neglect the effect of network delay among the servers on the accuracy of the load balancing solutions. As we show in this paper, the change in the load of the servers due to network delay would affect the performance of the load balancing algorithm. In this work, we conduct a formal analysis of this problem and discuss two efficient delay adjustment schemes to address the problem. Our experimental results show that our proposed schemes can significantly improve the performance of the load balancing algorithm with neglectable computation overhead.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Distributed virtual environments (DVEs) are becoming very popular in recent years, due to the rapid growing of applications, such as massive multiplayer online games (MMOGs). As the number of concurrent users increases, scalability becomes one of the major challenges in designing an interactive DVE system. One solution to address this scalability problem is to adopt a multi-server architecture. While some methods focus on the quality of partitioning the load among the servers, others focus on the efficiency of the partitioning process itself. However, all these methods neglect the effect of network delay among the servers on the accuracy of the load balancing solutions. As we show in this paper, the change in the load of the servers due to network delay would affect the performance of the load balancing algorithm. In this work, we conduct a formal analysis of this problem and discuss two efficient delay adjustment schemes to address the problem. Our experimental results show that our proposed schemes can significantly improve the performance of the load balancing algorithm with neglectable computation overhead.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Distributed virtual environments (DVEs) are becoming very popular in recent years, due to the rapid growing of applications, such as massive multiplayer online games (MMOGs). As the number of concurrent users increases, scalability becomes one of the major challenges in designing an interactive DVE system. One solution to address this scalability problem is to adopt a multi-server architecture. While some methods focus on the quality of partitioning the load among the servers, others focus on the efficiency of the partitioning process itself. However, all these methods neglect the effect of network delay among the servers on the accuracy of the load balancing solutions. As we show in this paper, the change in the load of the servers due to network delay would affect the performance of the load balancing algorithm. In this work, we conduct a formal analysis of this problem and discuss two efficient delay adjustment schemes to address the problem. Our experimental results show that our proposed schemes can significantly improve the performance of the load balancing algorithm with neglectable computation overhead.",
"title": "On Delay Adjustment for Dynamic Load Balancing in Distributed Virtual Environments",
"normalizedTitle": "On Delay Adjustment for Dynamic Load Balancing in Distributed Virtual Environments",
"fno": "ttg2012040529",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Virtual Reality",
"Formal Specification",
"Formal Verification",
"Resource Allocation",
"Dynamic Load Balancing Algorithm",
"Distributed Virtual Environment",
"Massive Multiplayer Online Game",
"Interactive DVE System",
"Multiserver Architecture",
"Load Partitioning",
"Network Delay Effect",
"Server Load",
"Formal Analysis",
"Delay Adjustment Schemes",
"Servers",
"Silicon",
"Load Management",
"Delay",
"Heating",
"Heuristic Algorithms",
"Load Modeling",
"Distributed Virtual Environments",
"Multi Server Architecture",
"Dynamic Load Balancing",
"Delay Adjustment"
],
"authors": [
{
"givenName": null,
"surname": "Yunhua Deng",
"fullName": "Yunhua Deng",
"affiliation": "Dept. of Comput. Sci., City Univ. of Hong Kong, Kowloon, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "R. W. H.",
"surname": "Lau",
"fullName": "R. W. H. Lau",
"affiliation": "Dept. of Comput. Sci., City Univ. of Hong Kong, Kowloon, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2012-04-01 00:00:00",
"pubType": "trans",
"pages": "529-537",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccsn/2009/3522/0/3522a309",
"title": "Research on the Stability of Load Balancing Algorithm for Scalable Parallel Computing",
"doi": null,
"abstractUrl": "/proceedings-article/iccsn/2009/3522a309/12OmNAoDi2Q",
"parentPublication": {
"id": "proceedings/iccsn/2009/3522/0",
"title": "Communication Software and Networks, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ladc/2011/4320/0/4320a156",
"title": "Load Balancing for Internet Distributed Services Using Limited Redirection Rates",
"doi": null,
"abstractUrl": "/proceedings-article/ladc/2011/4320a156/12OmNAtK4r6",
"parentPublication": {
"id": "proceedings/ladc/2011/4320/0",
"title": "Dependable Computing, Latin-American Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/edoc/2004/2214/0/22140135",
"title": "Evaluating the Performance of Middleware Load Balancing Strategies",
"doi": null,
"abstractUrl": "/proceedings-article/edoc/2004/22140135/12OmNB1eJBR",
"parentPublication": {
"id": "proceedings/edoc/2004/2214/0",
"title": "Proceedings. Eighth IEEE International Enterprise Distributed Object Computing Conference, 2004. EDOC 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cso/2009/3605/1/3605a068",
"title": "Adaptive Control of Stable Load Balancing Algorithm for Parallel Cluster Computing",
"doi": null,
"abstractUrl": "/proceedings-article/cso/2009/3605a068/12OmNC3FG4o",
"parentPublication": {
"id": "cso/2009/3605/1",
"title": "2009 International Joint Conference on Computational Sciences and Optimization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nswctc/2009/3610/1/3610a253",
"title": "The Effect of Execution-Time/Delay Ratio on Stability of Load Balancing Control",
"doi": null,
"abstractUrl": "/proceedings-article/nswctc/2009/3610a253/12OmNs0TKMc",
"parentPublication": {
"id": "proceedings/nswctc/2009/3610/1",
"title": "Networks Security, Wireless Communications and Trusted Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/focs/1995/7183/0/71830383",
"title": "Load balancing in the L/sub p/ norm",
"doi": null,
"abstractUrl": "/proceedings-article/focs/1995/71830383/12OmNwGZNPL",
"parentPublication": {
"id": "proceedings/focs/1995/7183/0",
"title": "Proceedings of IEEE 36th Annual Foundations of Computer Science",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-icess/2011/4538/0/4538a237",
"title": "Delay-Tolerant Dynamic Load Balancing",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-icess/2011/4538a237/12OmNxdm4sw",
"parentPublication": {
"id": "proceedings/hpcc-icess/2011/4538/0",
"title": "High Performance Computing and Communication & IEEE International Conference on Embedded Software and Systems, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dbta/2009/3604/0/3604a348",
"title": "Load Balancing System Applied Research",
"doi": null,
"abstractUrl": "/proceedings-article/dbta/2009/3604a348/12OmNyRg4yv",
"parentPublication": {
"id": "proceedings/dbta/2009/3604/0",
"title": "2009 First International Workshop on Database Technology and Applications, DBTA",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscsct/2008/3498/1/3498a404",
"title": "A Dynamic Load Balancing Method Based on Stability Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/iscsct/2008/3498a404/12OmNz5s0Mc",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2005/04/l0349",
"title": "Efficient, Proximity-Aware Load Balancing for DHT-Based P2P Systems",
"doi": null,
"abstractUrl": "/journal/td/2005/04/l0349/13rRUwj7coN",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg201204xiii",
"articleId": "13rRUy2YLYt",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2012040538",
"articleId": "13rRUwgQpDs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyaoDzi",
"title": "April",
"year": "2012",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwgQpDs",
"doi": "10.1109/TVCG.2012.55",
"abstract": "Walking is the most natural form of locomotion for humans, and real walking interfaces have demonstrated their benefits for several navigation tasks. With recently proposed redirection techniques it becomes possible to overcome space limitations as imposed by tracking sensors or laboratory setups, and, theoretically, it is now possible to walk through arbitrarily large virtual environments. However, walking as sole locomotion technique has drawbacks, in particular, for long distances, such that even in the real world we tend to support walking with passive or active transportation for longer-distance travel. In this article we show that concepts from the field of redirected walking can be applied to movements with transportation devices. We conducted psychophysical experiments to determine perceptual detection thresholds for redirected driving, and set these in relation to results from redirected walking. We show that redirected walking-and-driving approaches can easily be realized in immersive virtual reality laboratories, e. g., with electric wheelchairs, and show that such systems can combine advantages of real walking in confined spaces with benefits of using vehiclebased self-motion for longer-distance travel.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Walking is the most natural form of locomotion for humans, and real walking interfaces have demonstrated their benefits for several navigation tasks. With recently proposed redirection techniques it becomes possible to overcome space limitations as imposed by tracking sensors or laboratory setups, and, theoretically, it is now possible to walk through arbitrarily large virtual environments. However, walking as sole locomotion technique has drawbacks, in particular, for long distances, such that even in the real world we tend to support walking with passive or active transportation for longer-distance travel. In this article we show that concepts from the field of redirected walking can be applied to movements with transportation devices. We conducted psychophysical experiments to determine perceptual detection thresholds for redirected driving, and set these in relation to results from redirected walking. We show that redirected walking-and-driving approaches can easily be realized in immersive virtual reality laboratories, e. g., with electric wheelchairs, and show that such systems can combine advantages of real walking in confined spaces with benefits of using vehiclebased self-motion for longer-distance travel.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Walking is the most natural form of locomotion for humans, and real walking interfaces have demonstrated their benefits for several navigation tasks. With recently proposed redirection techniques it becomes possible to overcome space limitations as imposed by tracking sensors or laboratory setups, and, theoretically, it is now possible to walk through arbitrarily large virtual environments. However, walking as sole locomotion technique has drawbacks, in particular, for long distances, such that even in the real world we tend to support walking with passive or active transportation for longer-distance travel. In this article we show that concepts from the field of redirected walking can be applied to movements with transportation devices. We conducted psychophysical experiments to determine perceptual detection thresholds for redirected driving, and set these in relation to results from redirected walking. We show that redirected walking-and-driving approaches can easily be realized in immersive virtual reality laboratories, e. g., with electric wheelchairs, and show that such systems can combine advantages of real walking in confined spaces with benefits of using vehiclebased self-motion for longer-distance travel.",
"title": "Redirecting Walking and Driving for Natural Navigation in Immersive Virtual Environments",
"normalizedTitle": "Redirecting Walking and Driving for Natural Navigation in Immersive Virtual Environments",
"fno": "ttg2012040538",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Virtual Reality",
"Interactive Devices",
"User Interfaces",
"Navigation Task",
"Natural Navigation",
"Immersive Virtual Environment",
"Walking Locomotion",
"Driving Locomotion",
"Walking Interface",
"Redirection Technique",
"Passive Transportation",
"Active Transportation",
"Redirected Walking",
"Transportation Device",
"Perceptual Detection Threshold",
"Redirected Driving",
"Electric Wheelchair",
"Vehicle Based Self Motion",
"Longer Distance Travel",
"Legged Locomotion",
"Wheelchairs",
"Laboratories",
"Visualization",
"Navigation",
"Vehicles",
"Space Exploration",
"Motion Perception",
"Redirected Walking",
"Redirected Driving",
"Natural Locomotion",
"Self 8211"
],
"authors": [
{
"givenName": "G.",
"surname": "Bruder",
"fullName": "G. Bruder",
"affiliation": "Dept. of Comput. Sci., Univ. of Wurzburg, Wurzburg, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "V.",
"surname": "Interrante",
"fullName": "V. Interrante",
"affiliation": "Dept. of Comput. Sci. & Eng., Univ. of Minnesota, Minneapolis, MN, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "L.",
"surname": "Phillips",
"fullName": "L. Phillips",
"affiliation": "Dept. of Comput. Sci. & Eng., Univ. of Minnesota, Minneapolis, MN, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "F.",
"surname": "Steinicke",
"fullName": "F. Steinicke",
"affiliation": "Dept. of Comput. Sci., Univ. of Wurzburg, Wurzburg, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2012-04-01 00:00:00",
"pubType": "trans",
"pages": "538-545",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/1999/0093/0/00930286",
"title": "Walking About Virtual Environments on an Infinite Floor",
"doi": null,
"abstractUrl": "/proceedings-article/vr/1999/00930286/12OmNwDACcO",
"parentPublication": {
"id": "proceedings/vr/1999/0093/0",
"title": "Proceedings of Virtual Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2011/0039/0/05759437",
"title": "An evaluation of navigational ability comparing Redirected Free Exploration with Distractors to Walking-in-Place and joystick locomotio interfaces",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2011/05759437/12OmNx8OuyK",
"parentPublication": {
"id": "proceedings/vr/2011/0039/0",
"title": "2011 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549395",
"title": "Flexible and general redirected walking for head-mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549395/12OmNxFJXN3",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2011/0039/0/05759456",
"title": "Shadow walking: An unencumbered locomotion technique for systems with under-floor projection",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2011/05759456/12OmNyNQSC1",
"parentPublication": {
"id": "proceedings/vr/2011/0039/0",
"title": "2011 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446579",
"title": "Leveraging Configuration Spaces and Navigation Functions for Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446579/13bd1fdV4lq",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/01/ttg2010010017",
"title": "Estimation of Detection Thresholds for Redirected Walking Techniques",
"doi": null,
"abstractUrl": "/journal/tg/2010/01/ttg2010010017/13rRUxZ0o1t",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/07/06109251",
"title": "The Design and Evaluation of a Large-Scale Real-Walking Locomotion Interface",
"doi": null,
"abstractUrl": "/journal/tg/2012/07/06109251/13rRUygT7mV",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/07/06200791",
"title": "Velocity-Dependent Dynamic Curvature Gain for Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/2012/07/06200791/13rRUyuNswW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09715721",
"title": "Validating Simulation-Based Evaluation of Redirected Walking Systems",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09715721/1B4hxt06P9m",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798286",
"title": "Evaluating the Effectiveness of Redirected Walking with Auditory Distractors for Navigation in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798286/1cJ0PIoIPV6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2012040529",
"articleId": "13rRUxjQypb",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2012040546",
"articleId": "13rRUxOve9G",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXnFsl",
"name": "ttg2012040538s1.mov",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2012040538s1.mov",
"extension": "mov",
"size": "15.3 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyaoDzi",
"title": "April",
"year": "2012",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxOve9G",
"doi": "10.1109/TVCG.2012.60",
"abstract": "Immersive spaces such as 4-sided displays with stereo viewing and high-quality tracking provide a very engaging and realistic virtual experience. However, walking is inherently limited by the restricted physical space, both due to the screens (limited translation) and the missing back screen (limited rotation). In this paper, we propose three novel locomotion techniques that have three concurrent goals: keep the user safe from reaching the translational and rotational boundaries; increase the amount of real walking and finally, provide a more enjoyable and ecological interaction paradigm compared to traditional controller-based approaches. We notably introduce the \"Virtual Companion\", which uses a small bird to guide the user through VEs larger than the physical space. We evaluate the three new techniques through a user study with travel-to-target and path following tasks. The study provides insight into the relative strengths of each new technique for the three aforementioned goals. Specifically, if speed and accuracy are paramount, traditional controller interfaces augmented with our novel warning techniques may be more appropriate; if physical walking is more important, two of our paradigms (extended Magic Barrier Tape and Constrained Wand) should be preferred; last, fun and ecological criteria would favor the Virtual Companion.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Immersive spaces such as 4-sided displays with stereo viewing and high-quality tracking provide a very engaging and realistic virtual experience. However, walking is inherently limited by the restricted physical space, both due to the screens (limited translation) and the missing back screen (limited rotation). In this paper, we propose three novel locomotion techniques that have three concurrent goals: keep the user safe from reaching the translational and rotational boundaries; increase the amount of real walking and finally, provide a more enjoyable and ecological interaction paradigm compared to traditional controller-based approaches. We notably introduce the \"Virtual Companion\", which uses a small bird to guide the user through VEs larger than the physical space. We evaluate the three new techniques through a user study with travel-to-target and path following tasks. The study provides insight into the relative strengths of each new technique for the three aforementioned goals. Specifically, if speed and accuracy are paramount, traditional controller interfaces augmented with our novel warning techniques may be more appropriate; if physical walking is more important, two of our paradigms (extended Magic Barrier Tape and Constrained Wand) should be preferred; last, fun and ecological criteria would favor the Virtual Companion.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Immersive spaces such as 4-sided displays with stereo viewing and high-quality tracking provide a very engaging and realistic virtual experience. However, walking is inherently limited by the restricted physical space, both due to the screens (limited translation) and the missing back screen (limited rotation). In this paper, we propose three novel locomotion techniques that have three concurrent goals: keep the user safe from reaching the translational and rotational boundaries; increase the amount of real walking and finally, provide a more enjoyable and ecological interaction paradigm compared to traditional controller-based approaches. We notably introduce the \"Virtual Companion\", which uses a small bird to guide the user through VEs larger than the physical space. We evaluate the three new techniques through a user study with travel-to-target and path following tasks. The study provides insight into the relative strengths of each new technique for the three aforementioned goals. Specifically, if speed and accuracy are paramount, traditional controller interfaces augmented with our novel warning techniques may be more appropriate; if physical walking is more important, two of our paradigms (extended Magic Barrier Tape and Constrained Wand) should be preferred; last, fun and ecological criteria would favor the Virtual Companion.",
"title": "Walking in a Cube: Novel Metaphors for Safely Navigating Large Virtual Environments in Restricted Real Workspaces",
"normalizedTitle": "Walking in a Cube: Novel Metaphors for Safely Navigating Large Virtual Environments in Restricted Real Workspaces",
"fno": "ttg2012040546",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Virtual Reality",
"Computer Displays",
"User Interfaces",
"Constrained Wand Paradigm",
"Virtual Environment Navigation",
"Immersive Space",
"4 Sided Display",
"Stereo Viewing",
"High Quality Tracking",
"Virtual Experience",
"Limited Translation",
"Limited Rotation",
"Locomotion Technique",
"Translational Boundary",
"Rotational Boundary",
"Ecological Interaction Paradigm",
"Controller Based Approach",
"Virtual Companion",
"User Study",
"Travel To Target Task",
"Path Following Task",
"Warning Technique",
"Controller Interface",
"Magic Barrier Tape Paradigm",
"Legged Locomotion",
"Navigation",
"Birds",
"Safety",
"Virtual Environments",
"Visualization",
"Face",
"Restricted Workspaces",
"Virtual Reality",
"Locomotion Techniques",
"Walking"
],
"authors": [
{
"givenName": "G.",
"surname": "Cirio",
"fullName": "G. Cirio",
"affiliation": "INRIA Rennes, Rennes, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "P.",
"surname": "Vangorp",
"fullName": "P. Vangorp",
"affiliation": "REVES/INRIA, Sophia-Antipolis, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "E.",
"surname": "Chapoulie",
"fullName": "E. Chapoulie",
"affiliation": "REVES/INRIA, Sophia-Antipolis, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "M.",
"surname": "Marchal",
"fullName": "M. Marchal",
"affiliation": "INRIA Rennes, Rennes, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "A.",
"surname": "Lecuyer",
"fullName": "A. Lecuyer",
"affiliation": "INRIA Rennes, Rennes, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "G.",
"surname": "Drettakis",
"fullName": "G. Drettakis",
"affiliation": "REVES/INRIA, Sophia-Antipolis, France",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2012-04-01 00:00:00",
"pubType": "trans",
"pages": "546-554",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892348",
"title": "Steering locomotion by vestibular perturbation in room-scale VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892348/12OmNvrMUgU",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08448288",
"title": "Experiencing an Invisible World War I Battlefield Through Narrative-Driven Redirected Walking in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08448288/13bd1fZBGdu",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2013/01/mcg2013010006",
"title": "Using Perceptual Illusions for Redirected Walking",
"doi": null,
"abstractUrl": "/magazine/cg/2013/01/mcg2013010006/13rRUB6SpRZ",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/01/ttg2010010017",
"title": "Estimation of Detection Thresholds for Redirected Walking Techniques",
"doi": null,
"abstractUrl": "/journal/tg/2010/01/ttg2010010017/13rRUxZ0o1t",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/07/06200791",
"title": "Velocity-Dependent Dynamic Curvature Gain for Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/2012/07/06200791/13rRUyuNswW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798255",
"title": "HapticSphere: Physical Support To Enable Precision Touch Interaction in Mobile Mixed-Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798255/1cJ0Uje3t8Q",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798319",
"title": "Simulation and Evaluation of Three-User Redirected Walking Algorithm in Shared Physical Spaces",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798319/1cJ1aPwr8l2",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797763",
"title": "Physiological Effectivity and User Experience of Immersive Gait Rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797763/1cJ1eRI8HqE",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090595",
"title": "Reactive Alignment of Virtual and Physical Environments Using Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090595/1jIxm1j8B2w",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a627",
"title": "The Cognitive Load and Usability of Three Walking Metaphors for Consumer Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a627/1pysyecdlzq",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2012040538",
"articleId": "13rRUwgQpDs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2012040555",
"articleId": "13rRUygBwhF",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRQg",
"name": "ttg2012040546s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2012040546s1.zip",
"extension": "zip",
"size": "28.3 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyaoDzi",
"title": "April",
"year": "2012",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUygBwhF",
"doi": "10.1109/TVCG.2012.47",
"abstract": "Walking is only possible within immersive virtual environments that fit inside the boundaries of the user's physical workspace. To reduce the severity of the restrictions imposed by limited physical area, we introduce \"impossible spaces,\" a new design mechanic for virtual environments that wish to maximize the size of the virtual environment that can be explored with natural locomotion. Such environments make use of self-overlapping architectural layouts, effectively compressing comparatively large interior environments into smaller physical areas. We conducted two formal user studies to explore the perception and experience of impossible spaces. In the first experiment, we showed that reasonably small virtual rooms may overlap by as much as 56% before users begin to detect that they are in an impossible space, and that the larger virtual rooms that expanded to maximally fill our available 9.14m × 9.14m workspace may overlap by up to 31%. Our results also demonstrate that users perceive distances to objects in adjacent overlapping rooms as if the overall space was uncompressed, even at overlap levels that were overtly noticeable. In our second experiment, we combined several well-known redirection techniques to string together a chain of impossible spaces in an expansive outdoor scene. We then conducted an exploratory analysis of users' verbal feedback during exploration, which indicated that impossible spaces provide an even more powerful illusion when users are naive to the manipulation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Walking is only possible within immersive virtual environments that fit inside the boundaries of the user's physical workspace. To reduce the severity of the restrictions imposed by limited physical area, we introduce \"impossible spaces,\" a new design mechanic for virtual environments that wish to maximize the size of the virtual environment that can be explored with natural locomotion. Such environments make use of self-overlapping architectural layouts, effectively compressing comparatively large interior environments into smaller physical areas. We conducted two formal user studies to explore the perception and experience of impossible spaces. In the first experiment, we showed that reasonably small virtual rooms may overlap by as much as 56% before users begin to detect that they are in an impossible space, and that the larger virtual rooms that expanded to maximally fill our available 9.14m × 9.14m workspace may overlap by up to 31%. Our results also demonstrate that users perceive distances to objects in adjacent overlapping rooms as if the overall space was uncompressed, even at overlap levels that were overtly noticeable. In our second experiment, we combined several well-known redirection techniques to string together a chain of impossible spaces in an expansive outdoor scene. We then conducted an exploratory analysis of users' verbal feedback during exploration, which indicated that impossible spaces provide an even more powerful illusion when users are naive to the manipulation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Walking is only possible within immersive virtual environments that fit inside the boundaries of the user's physical workspace. To reduce the severity of the restrictions imposed by limited physical area, we introduce \"impossible spaces,\" a new design mechanic for virtual environments that wish to maximize the size of the virtual environment that can be explored with natural locomotion. Such environments make use of self-overlapping architectural layouts, effectively compressing comparatively large interior environments into smaller physical areas. We conducted two formal user studies to explore the perception and experience of impossible spaces. In the first experiment, we showed that reasonably small virtual rooms may overlap by as much as 56% before users begin to detect that they are in an impossible space, and that the larger virtual rooms that expanded to maximally fill our available 9.14m × 9.14m workspace may overlap by up to 31%. Our results also demonstrate that users perceive distances to objects in adjacent overlapping rooms as if the overall space was uncompressed, even at overlap levels that were overtly noticeable. In our second experiment, we combined several well-known redirection techniques to string together a chain of impossible spaces in an expansive outdoor scene. We then conducted an exploratory analysis of users' verbal feedback during exploration, which indicated that impossible spaces provide an even more powerful illusion when users are naive to the manipulation.",
"title": "Impossible Spaces: Maximizing Natural Walking in Virtual Environments with Self-Overlapping Architecture",
"normalizedTitle": "Impossible Spaces: Maximizing Natural Walking in Virtual Environments with Self-Overlapping Architecture",
"fno": "ttg2012040555",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Virtual Reality",
"Feedback",
"Gait Analysis",
"Human Computer Interaction",
"Illusion",
"Natural Walking",
"Self Overlapping Architectural Layout",
"Immersive Virtual Environment",
"User Physical Workspace",
"Impossible Spaces",
"Design Mechanic",
"Natural Locomotion",
"Virtual Room",
"Adjacent Overlapping Room",
"Redirection Technique",
"Expansive Outdoor Scene",
"Users Verbal Feedback",
"Virtual Environments",
"Legged Locomotion",
"Buildings",
"Educational Institutions",
"Estimation",
"Layout",
"Space Exploration",
"Redirection",
"Virtual Environments",
"Perception",
"Spatial Illusions"
],
"authors": [
{
"givenName": "E. A.",
"surname": "Suma",
"fullName": "E. A. Suma",
"affiliation": "Inst. for Creative Technol., Univ. of Southern California, Playa Vista, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Z.",
"surname": "Lipps",
"fullName": "Z. Lipps",
"affiliation": "Inst. for Creative Technol., Univ. of Southern California, Playa Vista, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "S.",
"surname": "Finkelstein",
"fullName": "S. Finkelstein",
"affiliation": "Carnegie Mellon Univ., Pittsburgh, PA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "D. M.",
"surname": "Krum",
"fullName": "D. M. Krum",
"affiliation": "Inst. for Creative Technol., Univ. of Southern California, Playa Vista, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "M.",
"surname": "Bolas",
"fullName": "M. Bolas",
"affiliation": "Sch. of Cinematic Arts, Univ. of Southern California, Los Angeles, CA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2012-04-01 00:00:00",
"pubType": "trans",
"pages": "555-564",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892335",
"title": "Designing intentional impossible spaces in virtual reality narratives: A case study",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892335/12OmNApcu9b",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549386",
"title": "Flexible spaces: A virtual step outside of reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549386/12OmNBOllfZ",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2013/6097/0/06550194",
"title": "Flexible spaces: Dynamic layout generation for infinite walking in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550194/12OmNyFU75b",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446167",
"title": "Redirected Spaces: Going Beyond Borders",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446167/13bd1fph1xv",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a167",
"title": "Foldable Spaces: An Overt Redirection Approach for Natural Walking in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a167/1CJc5J6RYYM",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a622",
"title": "Minimaps for Impossible Spaces: Improving Spatial Cognition in Self-Overlapping Virtual Rooms",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a622/1CJe2Dfxj8Y",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798074",
"title": "VRoamer: Generating On-The-Fly VR Experiences While Walking inside Large, Unknown Real-World Building Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798074/1cJ0OPBhW4U",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090660",
"title": "Relative Room Size Judgments in Impossible Spaces",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090660/1jIxqsUN6ik",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a389",
"title": "Impossible Open Spaces: Exploring the Effects of Occlusion on the Noticeability of Self-Overlapping Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a389/1tnWx88Rxuw",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a050",
"title": "Impossible Staircase: Vertically Real Walking in an Infinite Virtual Tower",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a050/1tuAqY26wzm",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2012040546",
"articleId": "13rRUxOve9G",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2012040565",
"articleId": "13rRUEgs2LZ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyaoDzi",
"title": "April",
"year": "2012",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUEgs2LZ",
"doi": "10.1109/TVCG.2012.44",
"abstract": "In this paper, we explore techniques that aim to improve site understanding for outdoor Augmented Reality (AR) applications. While the first person perspective in AR is a direct way of filtering and zooming on a portion of the data set, it severely narrows overview of the situation, particularly over large areas. We present two interactive techniques to overcome this problem: multi-view AR and variable perspective view. We describe in details the conceptual, visualization and interaction aspects of these techniques and their evaluation through a comparative user study. The results we have obtained strengthen the validity of our approach and the applicability of our methods to a large range of application domains.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we explore techniques that aim to improve site understanding for outdoor Augmented Reality (AR) applications. While the first person perspective in AR is a direct way of filtering and zooming on a portion of the data set, it severely narrows overview of the situation, particularly over large areas. We present two interactive techniques to overcome this problem: multi-view AR and variable perspective view. We describe in details the conceptual, visualization and interaction aspects of these techniques and their evaluation through a comparative user study. The results we have obtained strengthen the validity of our approach and the applicability of our methods to a large range of application domains.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we explore techniques that aim to improve site understanding for outdoor Augmented Reality (AR) applications. While the first person perspective in AR is a direct way of filtering and zooming on a portion of the data set, it severely narrows overview of the situation, particularly over large areas. We present two interactive techniques to overcome this problem: multi-view AR and variable perspective view. We describe in details the conceptual, visualization and interaction aspects of these techniques and their evaluation through a comparative user study. The results we have obtained strengthen the validity of our approach and the applicability of our methods to a large range of application domains.",
"title": "Extended Overview Techniques for Outdoor Augmented Reality",
"normalizedTitle": "Extended Overview Techniques for Outdoor Augmented Reality",
"fno": "ttg2012040565",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Augmented Reality",
"Extended Overview Techniques",
"Outdoor Augmented Reality Application",
"Site Understanding",
"Data Set Portion Filtering",
"Data Set Portion Zooming",
"Multiview AR Interactive Techniques",
"Variable Perspective View Interactive Techniques",
"Comparative User Study",
"Cameras",
"Data Visualization",
"Three Dimensional Displays",
"Mobile Communication",
"Navigation",
"Solid Modeling",
"Context",
"Information Interfaces And Presentation",
"Mobile Augmented Reality",
"Multi Perspective Views",
"Situation Awareness",
"Navigation"
],
"authors": [
{
"givenName": "Eduardo",
"surname": "Veas",
"fullName": "Eduardo Veas",
"affiliation": "Graz University of Technology",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Raphael",
"surname": "Grasset",
"fullName": "Raphael Grasset",
"affiliation": "Graz University of Technology",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ernst",
"surname": "Kruijff",
"fullName": "Ernst Kruijff",
"affiliation": "Center of Usability Research",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dieter",
"surname": "Schmalstieg",
"fullName": "Dieter Schmalstieg",
"affiliation": "Graz University of Technology",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2012-04-01 00:00:00",
"pubType": "trans",
"pages": "565-572",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2010/9343/0/05643602",
"title": "Camera pose navigation using Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643602/12OmNvA1hoG",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836489",
"title": "Riverwalk: Incorporating Historical Photographs in Public Outdoor Augmented Reality Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836489/12OmNvqmUM8",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671790",
"title": "Content first - A concept for industrial augmented reality maintenance applications using mobile devices",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671790/12OmNxGSm5G",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icat/2013/11/0/06728899",
"title": "Reality and perception: Utilization of many facets of augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2013/06728899/12OmNxiKrZY",
"parentPublication": {
"id": "proceedings/icat/2013/11/0",
"title": "2013 23rd International Conference on Artificial Reality and Telexistence (ICAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802045",
"title": "Transitional Augmented Reality navigation for live captured scenes",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802045/12OmNyRPgKH",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504692",
"title": "Evaluating wide-field-of-view augmented reality with mixed reality simulation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504692/12OmNzRZpYz",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iswc/2005/2419/0/24190092",
"title": "Pictorial Depth Cues for Outdoor Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/iswc/2005/24190092/12OmNzVoBzm",
"parentPublication": {
"id": "proceedings/iswc/2005/2419/0",
"title": "Ninth IEEE International Symposium on Wearable Computers (ISWC'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a493",
"title": "Cross-View Visual Geo-Localization for Outdoor Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a493/1MNgLMRvOtq",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797821",
"title": "Ground Camera Images and UAV 3D Model Registration for Outdoor Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797821/1cJ0QWVjZ5u",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a104",
"title": "User-Aided Global Registration Method using Geospatial 3D Data for Large-Scale Mobile Outdoor Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a104/1pBMkF3iMYU",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2012040555",
"articleId": "13rRUygBwhF",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2012040573",
"articleId": "13rRUyY28Yr",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyaoDzi",
"title": "April",
"year": "2012",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyY28Yr",
"doi": "10.1109/TVCG.2012.53",
"abstract": "In augmented reality, one of key tasks to achieve a convincing visual appearance consistency between virtual objects and video scenes is to have a coherent illumination along the whole sequence. As outdoor illumination is largely dependent on the weather, the lighting condition may change from frame to frame. In this paper, we propose a full image-based approach for online tracking of outdoor illumination variations from videos captured with moving cameras. Our key idea is to estimate the relative intensities of sunlight and skylight via a sparse set of planar feature-points extracted from each frame. To address the inevitable feature misalignments, a set of constraints are introduced to select the most reliable ones. Exploiting the spatial and temporal coherence of illumination, the relative intensities of sunlight and skylight are finally estimated by using an optimization process. We validate our technique on a set of real-life videos and show that the results with our estimations are visually coherent along the video sequences.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In augmented reality, one of key tasks to achieve a convincing visual appearance consistency between virtual objects and video scenes is to have a coherent illumination along the whole sequence. As outdoor illumination is largely dependent on the weather, the lighting condition may change from frame to frame. In this paper, we propose a full image-based approach for online tracking of outdoor illumination variations from videos captured with moving cameras. Our key idea is to estimate the relative intensities of sunlight and skylight via a sparse set of planar feature-points extracted from each frame. To address the inevitable feature misalignments, a set of constraints are introduced to select the most reliable ones. Exploiting the spatial and temporal coherence of illumination, the relative intensities of sunlight and skylight are finally estimated by using an optimization process. We validate our technique on a set of real-life videos and show that the results with our estimations are visually coherent along the video sequences.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In augmented reality, one of key tasks to achieve a convincing visual appearance consistency between virtual objects and video scenes is to have a coherent illumination along the whole sequence. As outdoor illumination is largely dependent on the weather, the lighting condition may change from frame to frame. In this paper, we propose a full image-based approach for online tracking of outdoor illumination variations from videos captured with moving cameras. Our key idea is to estimate the relative intensities of sunlight and skylight via a sparse set of planar feature-points extracted from each frame. To address the inevitable feature misalignments, a set of constraints are introduced to select the most reliable ones. Exploiting the spatial and temporal coherence of illumination, the relative intensities of sunlight and skylight are finally estimated by using an optimization process. We validate our technique on a set of real-life videos and show that the results with our estimations are visually coherent along the video sequences.",
"title": "Online Tracking of Outdoor Lighting Variations for Augmented Reality with Moving Cameras",
"normalizedTitle": "Online Tracking of Outdoor Lighting Variations for Augmented Reality with Moving Cameras",
"fno": "ttg2012040573",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Video Signal Processing",
"Augmented Reality",
"Cameras",
"Feature Extraction",
"Image Sequences",
"Lighting",
"Object Tracking",
"Optimisation",
"Optimization Process",
"Online Tracking",
"Outdoor Lighting Variation",
"Augmented Reality",
"Moving Camera",
"Visual Appearance Consistency",
"Virtual Object",
"Video Scene",
"Illumination",
"Video Sequence",
"Lighting Condition",
"Full Image Based Approach",
"Sunlight Relative Intensity",
"Skylight Relative Intensity",
"Planar Feature Point Extraction",
"Spatial Coherence",
"Temporal Coherence",
"Lighting",
"Estimation",
"Cameras",
"Three Dimensional Displays",
"Feature Extraction",
"Buildings",
"Geometry",
"Moving Cameras",
"Augmented Reality",
"Illumination Coherence"
],
"authors": [
{
"givenName": null,
"surname": "Yanli Liu",
"fullName": "Yanli Liu",
"affiliation": "Coll. of Comput. Sci., Sichuan Univ., Chengdu, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "X.",
"surname": "Granier",
"fullName": "X. Granier",
"affiliation": "LaBRI, INRIA Bordeaux Sud-Ouest, Bordeaux, France",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2012-04-01 00:00:00",
"pubType": "trans",
"pages": "573-580",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/3dv/2014/7000/1/7000a131",
"title": "Lighting Estimation in Outdoor Image Collections",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2014/7000a131/12OmNBdJ5j1",
"parentPublication": {
"id": "3dv/2014/7000/1",
"title": "2014 2nd International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cadgraphics/2011/4497/0/4497a439",
"title": "On-line Illumination Estimation of Outdoor Scenes Based on Area Selection for Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/cadgraphics/2011/4497a439/12OmNqyUUzK",
"parentPublication": {
"id": "proceedings/cadgraphics/2011/4497/0",
"title": "Computer-Aided Design and Computer Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/034P1A34",
"title": "Photometric stereo for outdoor webcams",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/034P1A34/12OmNrY3LEd",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pbmcv/1995/7021/0/00514684",
"title": "Reflectance analysis under solar illumination",
"doi": null,
"abstractUrl": "/proceedings-article/pbmcv/1995/00514684/12OmNxbW4O4",
"parentPublication": {
"id": "proceedings/pbmcv/1995/7021/0",
"title": "Proceedings of the Workshop on Physics-Based Modeling in Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2013/2246/0/2246a132",
"title": "Estimation of Environmental Lighting from Known Geometries for Mobile Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2013/2246a132/12OmNy5R3C7",
"parentPublication": {
"id": "proceedings/cw/2013/2246/0",
"title": "2013 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2013/3022/0/3022a907",
"title": "Verification of Sky Models for Image Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2013/3022a907/12OmNyo1nYo",
"parentPublication": {
"id": "proceedings/iccvw/2013/3022/0",
"title": "2013 IEEE International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2015/6964/0/07299082",
"title": "One-day outdoor photometric stereo via skylight estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2015/07299082/12OmNyuya6K",
"parentPublication": {
"id": "proceedings/cvpr/2015/6964/0",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1998/02/mcg1998020022",
"title": "Composite Lighting Simulations with Lighting Networks",
"doi": null,
"abstractUrl": "/magazine/cg/1998/02/mcg1998020022/13rRUx0xPvt",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2018/9264/0/926400a321",
"title": "Asynchronous Stroboscopic Structured Lighting Image Processing Using Low-Cost Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2018/926400a321/17D45XvMceO",
"parentPublication": {
"id": "proceedings/sibgrapi/2018/9264/0",
"title": "2018 31st SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300k0150",
"title": "All-Weather Deep Outdoor Lighting Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300k0150/1gyrg6Ricuc",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2012040565",
"articleId": "13rRUEgs2LZ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2012040581",
"articleId": "13rRUxASuGh",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyaoDzi",
"title": "April",
"year": "2012",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxASuGh",
"doi": "10.1109/TVCG.2012.58",
"abstract": "Stereoscopic depth cues improve depth perception and increase immersion within virtual environments (VEs). However, improper display of these cues can distort perceived distances and directions. Consider a multi-user VE, where all users view identical stereoscopic images regardless of physical location. In this scenario, cues are typically customized for one \"leader\" equipped with a head-tracking device. This user stands at the center of projection (CoP) and all other users (\"followers\") view the scene from other locations and receive improper depth cues. This paper examines perceived depth distortion when viewing stereoscopic VEs from follower perspectives and the impact of these distortions on collaborative spatial judgments. Pairs of participants made collaborative depth judgments of virtual shapes viewed from the CoP or after displacement forward or backward. Forward and backward displacement caused perceived depth compression and expansion, respectively, with greater compression than expansion. Furthermore, distortion was less than predicted by a ray-intersection model of stereo geometry. Collaboration times were significantly longer when participants stood at different locations compared to the same location, and increased with greater perceived depth discrepancy between the two viewing locations. These findings advance our understanding of spatial distortions in multi-user VEs, and suggest a strategy for reducing distortion.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Stereoscopic depth cues improve depth perception and increase immersion within virtual environments (VEs). However, improper display of these cues can distort perceived distances and directions. Consider a multi-user VE, where all users view identical stereoscopic images regardless of physical location. In this scenario, cues are typically customized for one \"leader\" equipped with a head-tracking device. This user stands at the center of projection (CoP) and all other users (\"followers\") view the scene from other locations and receive improper depth cues. This paper examines perceived depth distortion when viewing stereoscopic VEs from follower perspectives and the impact of these distortions on collaborative spatial judgments. Pairs of participants made collaborative depth judgments of virtual shapes viewed from the CoP or after displacement forward or backward. Forward and backward displacement caused perceived depth compression and expansion, respectively, with greater compression than expansion. Furthermore, distortion was less than predicted by a ray-intersection model of stereo geometry. Collaboration times were significantly longer when participants stood at different locations compared to the same location, and increased with greater perceived depth discrepancy between the two viewing locations. These findings advance our understanding of spatial distortions in multi-user VEs, and suggest a strategy for reducing distortion.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Stereoscopic depth cues improve depth perception and increase immersion within virtual environments (VEs). However, improper display of these cues can distort perceived distances and directions. Consider a multi-user VE, where all users view identical stereoscopic images regardless of physical location. In this scenario, cues are typically customized for one \"leader\" equipped with a head-tracking device. This user stands at the center of projection (CoP) and all other users (\"followers\") view the scene from other locations and receive improper depth cues. This paper examines perceived depth distortion when viewing stereoscopic VEs from follower perspectives and the impact of these distortions on collaborative spatial judgments. Pairs of participants made collaborative depth judgments of virtual shapes viewed from the CoP or after displacement forward or backward. Forward and backward displacement caused perceived depth compression and expansion, respectively, with greater compression than expansion. Furthermore, distortion was less than predicted by a ray-intersection model of stereo geometry. Collaboration times were significantly longer when participants stood at different locations compared to the same location, and increased with greater perceived depth discrepancy between the two viewing locations. These findings advance our understanding of spatial distortions in multi-user VEs, and suggest a strategy for reducing distortion.",
"title": "The Right View from the Wrong Location: Depth Perception in Stereoscopic Multi-User Virtual Environments",
"normalizedTitle": "The Right View from the Wrong Location: Depth Perception in Stereoscopic Multi-User Virtual Environments",
"fno": "ttg2012040581",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Virtual Reality",
"Computer Displays",
"Stereo Image Processing",
"User Interfaces",
"Stereoscopic Displays",
"Depth Perception",
"Stereoscopic Multiuser Virtual Environment",
"Stereoscopic Depth Cue",
"Immersion",
"Stereoscopic Image",
"Head Tracking Device",
"Center Of Projection",
"Follower Perspective",
"Leader Perspective",
"Collaborative Spatial Judgment",
"Virtual Shape",
"Forward Displacement",
"Backward Displacement",
"Perceived Depth Compression",
"Perceived Depth Expansion",
"Ray Intersection Model",
"Stereo Geometry",
"Distortion Reduction Strategy",
"Virtual Environments",
"Predictive Models",
"Stereo Image Processing",
"Shape",
"Collaboration",
"Educational Institutions",
"And Collaborative Interaction",
"Perception",
"Stereoscopy"
],
"authors": [
{
"givenName": "B.",
"surname": "Pollock",
"fullName": "B. Pollock",
"affiliation": "Comput. Eng. Dept., Iowa State Univ., Ames, IA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "M.",
"surname": "Burton",
"fullName": "M. Burton",
"affiliation": "Human Comput. Interaction Program & Virtual Reality Applic. Center, Iowa State Univ., Ames, IA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "J. W.",
"surname": "Kelly",
"fullName": "J. W. Kelly",
"affiliation": "Dept. of Psychol., Iowa State Univ., Ames, IA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "S.",
"surname": "Gilbert",
"fullName": "S. Gilbert",
"affiliation": "Human Comput. Interaction Program & Virtual Reality Applic. Center, Iowa State Univ., Ames, IA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "E.",
"surname": "Winer",
"fullName": "E. Winer",
"affiliation": "Mech. Eng. Dept., Iowa State Univ., Ames, IA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2012-04-01 00:00:00",
"pubType": "trans",
"pages": "581-588",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2012/1226/0/007P1A07",
"title": "Scene warping: Layer-based stereoscopic image resizing",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/007P1A07/12OmNAiFI8D",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/lcn/2012/1565/0/06423586",
"title": "3DSVAT: A 3D Stereoscopic Vulnerability Assessment Tool for network security",
"doi": null,
"abstractUrl": "/proceedings-article/lcn/2012/06423586/12OmNqzcvC0",
"parentPublication": {
"id": "proceedings/lcn/2012/1565/0",
"title": "37th Annual IEEE Conference on Local Computer Networks",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrais/1993/1363/0/00380802",
"title": "The effect of interocular distance upon operator performance using stereoscopic displays to perform virtual depth tasks",
"doi": null,
"abstractUrl": "/proceedings-article/vrais/1993/00380802/12OmNrJAdLH",
"parentPublication": {
"id": "proceedings/vrais/1993/1363/0",
"title": "Virtual Reality Annual International Symposium",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dicta/2010/4271/0/4271a227",
"title": "A Two-Stage Correlation Method for Stereoscopic Depth Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/dicta/2010/4271a227/12OmNwE9Ot5",
"parentPublication": {
"id": "proceedings/dicta/2010/4271/0",
"title": "2010 International Conference on Digital Image Computing: Techniques and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icycs/2008/3398/0/3398a831",
"title": "Precise Depth Perception in Projective Stereoscopic Display",
"doi": null,
"abstractUrl": "/proceedings-article/icycs/2008/3398a831/12OmNwtEEM5",
"parentPublication": {
"id": "proceedings/icycs/2008/3398/0",
"title": "2008 9th International Conference for Young Computer Scientists",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgi/1999/0185/0/01850080",
"title": "A Technique for Precise Depth Representation in Stereoscopic Display",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/1999/01850080/12OmNx3q6XQ",
"parentPublication": {
"id": "proceedings/cgi/1999/0185/0",
"title": "Computer Graphics International Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smc-it/2009/3637/0/3637a143",
"title": "Motor Activity-Perception Based Approach for Improving Teleoperation Systems",
"doi": null,
"abstractUrl": "/proceedings-article/smc-it/2009/3637a143/12OmNxFaLz6",
"parentPublication": {
"id": "proceedings/smc-it/2009/3637/0",
"title": "2009 Third IEEE International Conference on Space Mission Challenges for Information Technology (SMC-IT 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/is3c/2016/3071/0/3071a283",
"title": "A Study of Effects of Perceptual Cues on Presence for the Elderly in 3D Virtual Store",
"doi": null,
"abstractUrl": "/proceedings-article/is3c/2016/3071a283/12OmNzUPpta",
"parentPublication": {
"id": "proceedings/is3c/2016/3071/0",
"title": "2016 International Symposium on Computer, Consumer and Control (IS3C)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/08/ttg2013081375",
"title": "StereoPasting: Interactive Composition in Stereoscopic Images",
"doi": null,
"abstractUrl": "/journal/tg/2013/08/ttg2013081375/13rRUxC0SWa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itme/2019/3918/0/391800a758",
"title": "The Monocular Stereoscopic Model Based on an Ordered Wave Particle Swarm",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2019/391800a758/1gRxnxuyvDy",
"parentPublication": {
"id": "proceedings/itme/2019/3918/0",
"title": "2019 10th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2012040573",
"articleId": "13rRUyY28Yr",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2012040589",
"articleId": "13rRUwbs2b1",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyaoDzi",
"title": "April",
"year": "2012",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwbs2b1",
"doi": "10.1109/TVCG.2012.45",
"abstract": "Head-mounted displays (HMDs) allow users to observe virtual environments (VEs) from an egocentric perspective. However, several experiments have provided evidence that egocentric distances are perceived as compressed in VEs relative to the real world. Recent experiments suggest that the virtual view frustum set for rendering the VE has an essential impact on the user's estimation of distances. In this article we analyze if distance estimation can be improved by calibrating the view frustum for a given HMD and user. Unfortunately, in an immersive virtual reality (VR) environment, a full per user calibration is not trivial and manual per user adjustment often leads to mini- or magnification of the scene. Therefore, we propose a novel per user calibration approach with optical see-through displays commonly used in augmented reality (AR). This calibration takes advantage of a geometric scheme based on 2D point - 3D line correspondences, which can be used intuitively by inexperienced users and requires less than a minute to complete. The required user interaction is based on taking aim at a distant target marker with a close marker, which ensures non-planar measurements covering a large area of the interaction space while also reducing the number of required measurements to five. We found the tendency that a calibrated view frustum reduced the average distance underestimation of users in an immersive VR environment, but even the correctly calibrated view frustum could not entirely compensate for the distance underestimation effects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Head-mounted displays (HMDs) allow users to observe virtual environments (VEs) from an egocentric perspective. However, several experiments have provided evidence that egocentric distances are perceived as compressed in VEs relative to the real world. Recent experiments suggest that the virtual view frustum set for rendering the VE has an essential impact on the user's estimation of distances. In this article we analyze if distance estimation can be improved by calibrating the view frustum for a given HMD and user. Unfortunately, in an immersive virtual reality (VR) environment, a full per user calibration is not trivial and manual per user adjustment often leads to mini- or magnification of the scene. Therefore, we propose a novel per user calibration approach with optical see-through displays commonly used in augmented reality (AR). This calibration takes advantage of a geometric scheme based on 2D point - 3D line correspondences, which can be used intuitively by inexperienced users and requires less than a minute to complete. The required user interaction is based on taking aim at a distant target marker with a close marker, which ensures non-planar measurements covering a large area of the interaction space while also reducing the number of required measurements to five. We found the tendency that a calibrated view frustum reduced the average distance underestimation of users in an immersive VR environment, but even the correctly calibrated view frustum could not entirely compensate for the distance underestimation effects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Head-mounted displays (HMDs) allow users to observe virtual environments (VEs) from an egocentric perspective. However, several experiments have provided evidence that egocentric distances are perceived as compressed in VEs relative to the real world. Recent experiments suggest that the virtual view frustum set for rendering the VE has an essential impact on the user's estimation of distances. In this article we analyze if distance estimation can be improved by calibrating the view frustum for a given HMD and user. Unfortunately, in an immersive virtual reality (VR) environment, a full per user calibration is not trivial and manual per user adjustment often leads to mini- or magnification of the scene. Therefore, we propose a novel per user calibration approach with optical see-through displays commonly used in augmented reality (AR). This calibration takes advantage of a geometric scheme based on 2D point - 3D line correspondences, which can be used intuitively by inexperienced users and requires less than a minute to complete. The required user interaction is based on taking aim at a distant target marker with a close marker, which ensures non-planar measurements covering a large area of the interaction space while also reducing the number of required measurements to five. We found the tendency that a calibrated view frustum reduced the average distance underestimation of users in an immersive VR environment, but even the correctly calibrated view frustum could not entirely compensate for the distance underestimation effects.",
"title": "Geometric Calibration of Head-Mounted Displays and its Effects on Distance Estimation",
"normalizedTitle": "Geometric Calibration of Head-Mounted Displays and its Effects on Distance Estimation",
"fno": "ttg2012040589",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Rendering Computer Graphics",
"Augmented Reality",
"Calibration",
"Helmet Mounted Displays",
"Human Computer Interaction",
"Distance Estimation",
"Geometric Calibration",
"Head Mounted Display",
"HMD",
"Egocentric Perspective",
"Egocentric Distance",
"Virtual View Frustum Set",
"Rendering",
"Immersive Virtual Reality Environment",
"Full Per User Calibration",
"Manual Per User Adjustment",
"Optical See Through Display",
"Augmented Reality",
"Geometric Scheme",
"2 D Point 3 D Line Correspondences",
"User Interaction",
"Calibrated View Frustum",
"Average Distance Underestimation Reduction",
"Immersive VR Environment",
"Distance Underestimation Effects",
"Cameras",
"Calibration",
"Three Dimensional Displays",
"Estimation",
"Noise",
"Vectors",
"Target Tracking",
"Distance Perception",
"Optical See Through",
"HMD Calibration"
],
"authors": [
{
"givenName": "F.",
"surname": "Kellner",
"fullName": "F. Kellner",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "B.",
"surname": "Bolte",
"fullName": "B. Bolte",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "G.",
"surname": "Bruder",
"fullName": "G. Bruder",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "U.",
"surname": "Rautenberg",
"fullName": "U. Rautenberg",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "F.",
"surname": "Steinicke",
"fullName": "F. Steinicke",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "M.",
"surname": "Lappe",
"fullName": "M. Lappe",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "R.",
"surname": "Koch",
"fullName": "R. Koch",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2012-04-01 00:00:00",
"pubType": "trans",
"pages": "589-596",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2016/0836/0/07504693",
"title": "A calibration method for optical see-through head-mounted displays with a depth camera",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504693/12OmNAnMuMd",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iwar/1999/0359/0/03590085",
"title": "Marker Tracking and HMD Calibration for a Video-Based Augmented Reality Conferencing System",
"doi": null,
"abstractUrl": "/proceedings-article/iwar/1999/03590085/12OmNBcAGLe",
"parentPublication": {
"id": "proceedings/iwar/1999/0359/0",
"title": "Augmented Reality, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2003/2006/0/20060161",
"title": "Evaluation of Calibration Procedures for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2003/20060161/12OmNCeK2b7",
"parentPublication": {
"id": "proceedings/ismar/2003/2006/0",
"title": "The Second IEEE and ACM International Symposium on Mixed and Augmented Reality, 2003. Proceedings.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223450",
"title": "Evaluating optical see-through head-mounted display calibration via frustum visualization",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223450/12OmNrAv3Ap",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrais/1993/1363/0/00380772",
"title": "Calibration of head-mounted displays for augmented reality applications",
"doi": null,
"abstractUrl": "/proceedings-article/vrais/1993/00380772/12OmNwwuDRm",
"parentPublication": {
"id": "proceedings/vrais/1993/1363/0",
"title": "Virtual Reality Annual International Symposium",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iwar/1999/0359/0/03590075",
"title": "A Method for Calibrating See-Through Head-Mounted Displays for AR",
"doi": null,
"abstractUrl": "/proceedings-article/iwar/1999/03590075/12OmNxTVU20",
"parentPublication": {
"id": "proceedings/iwar/1999/0359/0",
"title": "Augmented Reality, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802063",
"title": "Automated calibration of display characteristics (ACDC) for head-mounted displays and arbitrary surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802063/12OmNxwENpf",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08052554",
"title": "A Survey of Calibration Methods for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08052554/13rRUILtJqY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07012105",
"title": "Corneal-Imaging Calibration for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07012105/13rRUxE04tC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a259",
"title": "OSTNet: Calibration Method for Optical See-Through Head-Mounted Displays via Non-Parametric Distortion Map Generation",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a259/1gysj1o4L16",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2012040581",
"articleId": "13rRUxASuGh",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2012040597",
"articleId": "13rRUxASuve",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyaoDzi",
"title": "April",
"year": "2012",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxASuve",
"doi": "10.1109/TVCG.2012.42",
"abstract": "Volume visualization has been widely used for decades for analyzing datasets ranging from 3D medical images to seismic data to paleontological data. Many have proposed using immersive virtual reality (VR) systems to view volume visualizations, and there is anecdotal evidence of the benefits of VR for this purpose. However, there has been very little empirical research exploring the effects of higher levels of immersion for volume visualization, and it is not known how various components of immersion influence the effectiveness of visualization in VR. We conducted a controlled experiment in which we studied the independent and combined effects of three components of immersion (head tracking, field of regard, and stereoscopic rendering) on the effectiveness of visualization tasks with two x-ray microscopic computed tomography datasets. We report significant benefits of analyzing volume data in an environment involving those components of immersion. We find that the benefits do not necessarily require all three components simultaneously, and that the components have variable influence on different task categories. The results of our study improve our understanding of the effects of immersion on perceived and actual task performance, and provide guidance on the choice of display systems to designers seeking to maximize the effectiveness of volume visualization applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Volume visualization has been widely used for decades for analyzing datasets ranging from 3D medical images to seismic data to paleontological data. Many have proposed using immersive virtual reality (VR) systems to view volume visualizations, and there is anecdotal evidence of the benefits of VR for this purpose. However, there has been very little empirical research exploring the effects of higher levels of immersion for volume visualization, and it is not known how various components of immersion influence the effectiveness of visualization in VR. We conducted a controlled experiment in which we studied the independent and combined effects of three components of immersion (head tracking, field of regard, and stereoscopic rendering) on the effectiveness of visualization tasks with two x-ray microscopic computed tomography datasets. We report significant benefits of analyzing volume data in an environment involving those components of immersion. We find that the benefits do not necessarily require all three components simultaneously, and that the components have variable influence on different task categories. The results of our study improve our understanding of the effects of immersion on perceived and actual task performance, and provide guidance on the choice of display systems to designers seeking to maximize the effectiveness of volume visualization applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Volume visualization has been widely used for decades for analyzing datasets ranging from 3D medical images to seismic data to paleontological data. Many have proposed using immersive virtual reality (VR) systems to view volume visualizations, and there is anecdotal evidence of the benefits of VR for this purpose. However, there has been very little empirical research exploring the effects of higher levels of immersion for volume visualization, and it is not known how various components of immersion influence the effectiveness of visualization in VR. We conducted a controlled experiment in which we studied the independent and combined effects of three components of immersion (head tracking, field of regard, and stereoscopic rendering) on the effectiveness of visualization tasks with two x-ray microscopic computed tomography datasets. We report significant benefits of analyzing volume data in an environment involving those components of immersion. We find that the benefits do not necessarily require all three components simultaneously, and that the components have variable influence on different task categories. The results of our study improve our understanding of the effects of immersion on perceived and actual task performance, and provide guidance on the choice of display systems to designers seeking to maximize the effectiveness of volume visualization applications.",
"title": "Effects of Immersion on Visual Analysis of Volume Data",
"normalizedTitle": "Effects of Immersion on Visual Analysis of Volume Data",
"fno": "ttg2012040597",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Virtual Reality",
"Computerised Tomography",
"Data Analysis",
"Data Visualisation",
"Rendering Computer Graphics",
"Immersive Virtual Reality Systems",
"Immersion Effect",
"Visual Analysis",
"Volume Data Analysis",
"Volume Visualization",
"3 D Medical Image",
"Seismic Data",
"Paleontological Data",
"Immersive VR System",
"Head Tracking Component",
"Field Of Regard Component",
"Stereoscopic Rendering Component",
"X Ray Microscopic Computed Tomography Dataset",
"Perceived Task Performance",
"Display System",
"Three Dimensional Displays",
"Data Visualization",
"Mice",
"Visualization",
"Rendering Computer Graphics",
"Training",
"Head",
"Virtual Reality",
"Immersion",
"Micro CT",
"Data Analysis",
"Volume Visualization",
"3 D Visualization",
"CAVE",
"Virtual Environments"
],
"authors": [
{
"givenName": "B.",
"surname": "Laha",
"fullName": "B. Laha",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "K.",
"surname": "Sensharma",
"fullName": "K. Sensharma",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "J. D.",
"surname": "Schiffbauer",
"fullName": "J. D. Schiffbauer",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "D. A.",
"surname": "Bowman",
"fullName": "D. A. Bowman",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2012-04-01 00:00:00",
"pubType": "trans",
"pages": "597-606",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/sibgrapi-t/2013/4176/0/4176a006",
"title": "Visual Immersion Issues in Virtual Reality: A Survey",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi-t/2013/4176a006/12OmNAmVH6E",
"parentPublication": {
"id": "proceedings/sibgrapi-t/2013/4176/0",
"title": "2013 26th Conference on Graphics, Patterns and Images Tutorials (SIBGRAPI-T)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nbis/2009/3767/0/3767a608",
"title": "Tele-Immersion Environment for Video Avatar Based CVE",
"doi": null,
"abstractUrl": "/proceedings-article/nbis/2009/3767a608/12OmNB06l11",
"parentPublication": {
"id": "proceedings/nbis/2009/3767/0",
"title": "2009 International Conference on Network-Based Information Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2011/0039/0/05759470",
"title": "VEGI: Virtual Environment GUI Immersion system",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2011/05759470/12OmNBKEyn1",
"parentPublication": {
"id": "proceedings/vr/2011/0039/0",
"title": "2011 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ainaw/2008/3096/0/3096b023",
"title": "Proposal of Multimodal Communications System Using Tele-Immersion Technology",
"doi": null,
"abstractUrl": "/proceedings-article/ainaw/2008/3096b023/12OmNvHGrAg",
"parentPublication": {
"id": "proceedings/ainaw/2008/3096/0",
"title": "2008 22nd International Conference on Advanced Information Networking and Applications (AINA 2008)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/04/ttg2013040529",
"title": "Validation of the MR Simulation Approach for Evaluating the Effects of Immersion on Visual Analysis of Volume Data",
"doi": null,
"abstractUrl": "/journal/tg/2013/04/ttg2013040529/13rRUwInvyx",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2007/07/r7036",
"title": "Virtual Reality: How Much Immersion Is Enough?",
"doi": null,
"abstractUrl": "/magazine/co/2007/07/r7036/13rRUwwJWIr",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg201404513",
"title": "Effects of VR System Fidelity on Analyzing Isosurface Visualization of Volume Datasets",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg201404513/13rRUxly95A",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798021",
"title": "The Effects of Immersion on Harm-inducing Factors in Virtual Slot Machines",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798021/1cJ16iGCIxy",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08836087",
"title": "The Impact of Immersion on Cluster Identification Tasks",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08836087/1dia1nodZeM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a440",
"title": "Effects of Immersion and Visual Angle on Brand Placement Effectiveness",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a440/1tnY3EP4yM8",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2012040589",
"articleId": "13rRUwbs2b1",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2012040607",
"articleId": "13rRUxASu0H",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyaoDzi",
"title": "April",
"year": "2012",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxASu0H",
"doi": "10.1109/TVCG.2012.41",
"abstract": "Direct replay of the experience of a user in a virtual environment is difficult for others to watch due to unnatural camera motions. We present methods for replaying and summarizing these egocentric experiences that effectively communicate the user's observations while reducing unwanted camera movements. Our approach summarizes the viewpoint path as a concise sequence of viewpoints that cover the same parts of the scene. The core of our approach is a novel content-dependent metric that can be used to identify similarities between viewpoints. This enables viewpoints to be grouped by similar contextual view information and provides a means to generate novel viewpoints that can encapsulate a series of views. These resulting encapsulated viewpoints are used to synthesize new camera paths that convey the content of the original viewer's experience. Projecting the initial movement of the user back on the scene can be used to convey the details of their observations, and the extracted viewpoints can serve as bookmarks for control or analysis. Finally we present performance analysis along with two forms of validation to test whether the extracted viewpoints are representative of the viewer's original observations and to test for the overall effectiveness of the presented replay methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Direct replay of the experience of a user in a virtual environment is difficult for others to watch due to unnatural camera motions. We present methods for replaying and summarizing these egocentric experiences that effectively communicate the user's observations while reducing unwanted camera movements. Our approach summarizes the viewpoint path as a concise sequence of viewpoints that cover the same parts of the scene. The core of our approach is a novel content-dependent metric that can be used to identify similarities between viewpoints. This enables viewpoints to be grouped by similar contextual view information and provides a means to generate novel viewpoints that can encapsulate a series of views. These resulting encapsulated viewpoints are used to synthesize new camera paths that convey the content of the original viewer's experience. Projecting the initial movement of the user back on the scene can be used to convey the details of their observations, and the extracted viewpoints can serve as bookmarks for control or analysis. Finally we present performance analysis along with two forms of validation to test whether the extracted viewpoints are representative of the viewer's original observations and to test for the overall effectiveness of the presented replay methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Direct replay of the experience of a user in a virtual environment is difficult for others to watch due to unnatural camera motions. We present methods for replaying and summarizing these egocentric experiences that effectively communicate the user's observations while reducing unwanted camera movements. Our approach summarizes the viewpoint path as a concise sequence of viewpoints that cover the same parts of the scene. The core of our approach is a novel content-dependent metric that can be used to identify similarities between viewpoints. This enables viewpoints to be grouped by similar contextual view information and provides a means to generate novel viewpoints that can encapsulate a series of views. These resulting encapsulated viewpoints are used to synthesize new camera paths that convey the content of the original viewer's experience. Projecting the initial movement of the user back on the scene can be used to convey the details of their observations, and the extracted viewpoints can serve as bookmarks for control or analysis. Finally we present performance analysis along with two forms of validation to test whether the extracted viewpoints are representative of the viewer's original observations and to test for the overall effectiveness of the presented replay methods.",
"title": "Effective Replays and Summarization of Virtual Experiences",
"normalizedTitle": "Effective Replays and Summarization of Virtual Experiences",
"fno": "ttg2012040607",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Virtual Reality",
"Cameras",
"Performance Analysis",
"Virtual Experience Replay",
"Virtual Experience Summarization",
"Virtual Environment",
"Unnatural Camera Motions",
"Egocentric Experiences",
"User Observations",
"Viewpoint Path Summarization",
"Content Dependent Metric",
"Contextual View Information",
"Viewpoint Extraction",
"Cameras",
"Measurement",
"Graphics Processing Unit",
"Equations",
"Three Dimensional Displays",
"Geometry",
"Virtual Environments",
"Bookmarking",
"Virtual Reality",
"Viewpoint Similarity",
"Summarization",
"GPU"
],
"authors": [
{
"givenName": "K.",
"surname": "Ponto",
"fullName": "K. Ponto",
"affiliation": "Dept. of Comput. Sci., Univ. of Wisconsin, Madison, WI, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "J.",
"surname": "Kohlmann",
"fullName": "J. Kohlmann",
"affiliation": "Dept. of Comput. Sci., Univ. of Wisconsin, Madison, WI, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "M.",
"surname": "Gleicher",
"fullName": "M. Gleicher",
"affiliation": "Dept. of Comput. Sci., Univ. of Wisconsin, Madison, WI, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2012-04-01 00:00:00",
"pubType": "trans",
"pages": "607-616",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iciw/2008/3163/0/3163a054",
"title": "Experiences with Virtual Environments in the EHEA",
"doi": null,
"abstractUrl": "/proceedings-article/iciw/2008/3163a054/12OmNBPc8ub",
"parentPublication": {
"id": "proceedings/iciw/2008/3163/0",
"title": "Internet and Web Applications and Services, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2001/0981/1/09811032",
"title": "Sharing Viewpoints in Collaborative Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2001/09811032/12OmNBlFQZa",
"parentPublication": {
"id": "proceedings/hicss/2001/0981/1",
"title": "Proceedings of the 34th Annual Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2002/1492/0/14920275",
"title": "Perceived Egocentric Distances in Real, Image-Based, and Traditional Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2002/14920275/12OmNwHhoQ2",
"parentPublication": {
"id": "proceedings/vr/2002/1492/0",
"title": "Proceedings IEEE Virtual Reality 2002",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/170P2A20",
"title": "Discovering important people and objects for egocentric video summarization",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/170P2A20/12OmNyKJinJ",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ase/2008/2187/0/04639351",
"title": "VCR: Virtual Capture and Replay for Performance Testing",
"doi": null,
"abstractUrl": "/proceedings-article/ase/2008/04639351/12OmNyKa60O",
"parentPublication": {
"id": "proceedings/ase/2008/2187/0",
"title": "2008 23rd IEEE/ACM International Conference on Automated Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsps/2009/3654/0/3654a606",
"title": "X3D-Based Virtual Reality Experiences in Water Museum Exhibitions",
"doi": null,
"abstractUrl": "/proceedings-article/icsps/2009/3654a606/12OmNyXMQeH",
"parentPublication": {
"id": "proceedings/icsps/2009/3654/0",
"title": "2009 International Conference on Signal Processing Systems (ICSPS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2008/3153/0/3153a286",
"title": "Multiple View Integration and Display Using Virtual Mirrors",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2008/3153a286/12OmNzRHONh",
"parentPublication": {
"id": "proceedings/crv/2008/3153/0",
"title": "2008 Canadian Conference on Computer and Robot Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2011/4541/0/4541a777",
"title": "Visual Saliency Based Aerial Video Summarization by Online Scene Classification",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2011/4541a777/12OmNzTH16l",
"parentPublication": {
"id": "proceedings/icig/2011/4541/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446144",
"title": "Redirected Scene Rotation for Immersive Movie Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446144/13bd1fHrlRD",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/06/08554159",
"title": "Scene Transitions and Teleportation in Virtual Reality and the Implications for Spatial Awareness and Sickness",
"doi": null,
"abstractUrl": "/journal/tg/2020/06/08554159/17D45WB0qbp",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2012040597",
"articleId": "13rRUxASuve",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2012040617",
"articleId": "13rRUyfKIHI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyaoDzi",
"title": "April",
"year": "2012",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyfKIHI",
"doi": "10.1109/TVCG.2012.46",
"abstract": "Palpation is a physical examination technique where objects, e.g., organs or body parts, are touched with fingers to determine their size, shape, consistency and location. Many medical procedures utilize palpation as a supplementary interaction technique and it can be therefore considered as an essential basic method. However, palpation is mostly neglected in medical training simulators, with the exception of very specialized simulators that solely focus on palpation, e.g., for manual cancer detection. In this article we propose a novel approach to enable haptic palpation interaction for virtual reality-based medical simulators. The main contribution is an extensive user study conducted with a large group of medical experts. To provide a plausible simulation framework for this user study, we contribute a novel and detailed interaction algorithm for palpation with tissue dragging, which utilizes a multi-object force algorithm to support multiple layers of anatomy and a pulse force algorithm for simulation of an arterial pulse. Furthermore, we propose a modification for an off-the-shelf haptic device by adding a lightweight palpation pad to support a more realistic finger grip configuration for palpation tasks. The user study itself has been conducted on a medical training simulator prototype with a specific procedure from regional anesthesia, which strongly depends on palpation. The prototype utilizes a co-rotational finite-element approach for soft tissue simulation and provides bimanual interaction by combining the aforementioned techniques with needle insertion for the other hand. The results of the user study suggest reasonable face validity of the simulator prototype and in particular validate medical plausibility of the proposed palpation interaction algorithm.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Palpation is a physical examination technique where objects, e.g., organs or body parts, are touched with fingers to determine their size, shape, consistency and location. Many medical procedures utilize palpation as a supplementary interaction technique and it can be therefore considered as an essential basic method. However, palpation is mostly neglected in medical training simulators, with the exception of very specialized simulators that solely focus on palpation, e.g., for manual cancer detection. In this article we propose a novel approach to enable haptic palpation interaction for virtual reality-based medical simulators. The main contribution is an extensive user study conducted with a large group of medical experts. To provide a plausible simulation framework for this user study, we contribute a novel and detailed interaction algorithm for palpation with tissue dragging, which utilizes a multi-object force algorithm to support multiple layers of anatomy and a pulse force algorithm for simulation of an arterial pulse. Furthermore, we propose a modification for an off-the-shelf haptic device by adding a lightweight palpation pad to support a more realistic finger grip configuration for palpation tasks. The user study itself has been conducted on a medical training simulator prototype with a specific procedure from regional anesthesia, which strongly depends on palpation. The prototype utilizes a co-rotational finite-element approach for soft tissue simulation and provides bimanual interaction by combining the aforementioned techniques with needle insertion for the other hand. The results of the user study suggest reasonable face validity of the simulator prototype and in particular validate medical plausibility of the proposed palpation interaction algorithm.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Palpation is a physical examination technique where objects, e.g., organs or body parts, are touched with fingers to determine their size, shape, consistency and location. Many medical procedures utilize palpation as a supplementary interaction technique and it can be therefore considered as an essential basic method. However, palpation is mostly neglected in medical training simulators, with the exception of very specialized simulators that solely focus on palpation, e.g., for manual cancer detection. In this article we propose a novel approach to enable haptic palpation interaction for virtual reality-based medical simulators. The main contribution is an extensive user study conducted with a large group of medical experts. To provide a plausible simulation framework for this user study, we contribute a novel and detailed interaction algorithm for palpation with tissue dragging, which utilizes a multi-object force algorithm to support multiple layers of anatomy and a pulse force algorithm for simulation of an arterial pulse. Furthermore, we propose a modification for an off-the-shelf haptic device by adding a lightweight palpation pad to support a more realistic finger grip configuration for palpation tasks. The user study itself has been conducted on a medical training simulator prototype with a specific procedure from regional anesthesia, which strongly depends on palpation. The prototype utilizes a co-rotational finite-element approach for soft tissue simulation and provides bimanual interaction by combining the aforementioned techniques with needle insertion for the other hand. The results of the user study suggest reasonable face validity of the simulator prototype and in particular validate medical plausibility of the proposed palpation interaction algorithm.",
"title": "Haptic Palpation for Medical Simulation in Virtual Environments",
"normalizedTitle": "Haptic Palpation for Medical Simulation in Virtual Environments",
"fno": "ttg2012040617",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Virtual Reality",
"Computer Based Training",
"Finite Element Analysis",
"Haptic Interfaces",
"Medical Computing",
"User Interfaces",
"Needle Insertion",
"Haptic Palpation",
"Medical Simulation",
"Virtual Environment",
"Palpation Examination Technique",
"Medical Procedure",
"Supplementary Interaction Technique",
"Medical Training Simulator",
"Manual Cancer Detection",
"Virtual Reality Based Medical Simulator",
"User Study",
"Tissue Dragging",
"Multiobject Force Algorithm",
"Pulse Force Algorithm",
"Arterial Pulse Simulation",
"Anatomy Layer Support",
"Haptic Device",
"Finger Grip Configuration",
"Regional Anesthesia",
"Corotational Finite Element Approach",
"Soft Tissue Simulation",
"Bimanual Interaction",
"Haptic Interfaces",
"Force",
"Skin",
"Rendering Computer Graphics",
"Bismuth",
"Visualization",
"Phantoms",
"User Studies",
"Medicine",
"Physically Based Simulation",
"Haptics"
],
"authors": [
{
"givenName": "S.",
"surname": "Ullrich",
"fullName": "S. Ullrich",
"affiliation": "Virtual Reality Group, RWTH Aachen Univ., Aachen, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "T.",
"surname": "Kuhlen",
"fullName": "T. Kuhlen",
"affiliation": "Virtual Reality Group, RWTH Aachen Univ., Aachen, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2012-04-01 00:00:00",
"pubType": "trans",
"pages": "617-625",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/achi/2009/3529/0/3529a193",
"title": "Haptic Palpation for the Femoral Pulse in Virtual Interventional Radiology",
"doi": null,
"abstractUrl": "/proceedings-article/achi/2009/3529a193/12OmNAZOJU1",
"parentPublication": {
"id": "proceedings/achi/2009/3529/0",
"title": "International Conference on Advances in Computer-Human Interaction",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2014/4261/0/4261a268",
"title": "Breast Palpation Simulation with Haptic Feedback: Prototype and Initial Results",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2014/4261a268/12OmNBhHt7R",
"parentPublication": {
"id": "proceedings/svr/2014/4261/0",
"title": "2014 XVI Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2017/2089/0/2089a202",
"title": "Modeling Deformable Objects for Medical Training with Haptic Devices",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2017/2089a202/12OmNCbU2U1",
"parentPublication": {
"id": "proceedings/cw/2017/2089/0",
"title": "2017 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2010/9343/0/05643585",
"title": "Haptic simulation of breast cancer palpation: A case study of haptic augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643585/12OmNwtn3ui",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisis/2016/0987/0/0987a382",
"title": "Improvement of Mesh Free Deforming Analysis for Maxillofacial Palpation on a Virtual Training System",
"doi": null,
"abstractUrl": "/proceedings-article/cisis/2016/0987a382/12OmNxVDuQI",
"parentPublication": {
"id": "proceedings/cisis/2016/0987/0",
"title": "2016 10th International Conference on Complex, Intelligent, and Software Intensive Systems (CISIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2011/1189/0/05999154",
"title": "Ultrasound palpation by haptic elastography",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2011/05999154/12OmNy3Agyv",
"parentPublication": {
"id": "proceedings/cbms/2011/1189/0",
"title": "2011 24th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2012/4814/0/4814a207",
"title": "Virtual Palpation for Medical Training in Cyberworlds",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2012/4814a207/12OmNzFv4gM",
"parentPublication": {
"id": "proceedings/cw/2012/4814/0",
"title": "2012 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2014/4261/0/4261a258",
"title": "Systematically Reviewing Techniques and Devices Used in Palpation Simulation with Haptic Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2014/4261a258/12OmNzXnNpJ",
"parentPublication": {
"id": "proceedings/svr/2014/4261/0",
"title": "2014 XVI Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2012/01/tth2012010077",
"title": "Rendering Virtual Tumors in Real Tissue Mock-Ups Using Haptic Augmented Reality",
"doi": null,
"abstractUrl": "/journal/th/2012/01/tth2012010077/13rRUwInvt1",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2012/02/tth2012020097",
"title": "Abdominal Palpation Haptic Device for Colonoscopy Simulation Using Pneumatic Control",
"doi": null,
"abstractUrl": "/journal/th/2012/02/tth2012020097/13rRUwInvyJ",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2012040607",
"articleId": "13rRUxASu0H",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2012040626",
"articleId": "13rRUwIF6dN",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRK9",
"name": "ttg2012040617s1.mpg",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2012040617s1.mpg",
"extension": "mpg",
"size": "13.1 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyaoDzi",
"title": "April",
"year": "2012",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwIF6dN",
"doi": "10.1109/TVCG.2012.43",
"abstract": "In recent years, consumers have witnessed a technological revolution that has delivered more-realistic experiences in their own homes through high-definition, stereoscopic televisions and natural, gesture-based video game consoles. Although these experiences are more realistic, offering higher levels of fidelity, it is not clear how the increased display and interaction aspects of fidelity impact the user experience. Since immersive virtual reality (VR) allows us to achieve very high levels of fidelity, we designed and conducted a study that used a six-sided CAVE to evaluate display fidelity and interaction fidelity independently, at extremely high and low levels, for a VR first-person shooter (FPS) game. Our goal was to gain a better understanding of the effects of fidelity on the user in a complex, performance-intensive context. The results of our study indicate that both display and interaction fidelity significantly affect strategy and performance, as well as subjective judgments of presence, engagement, and usability. In particular, performance results were strongly in favor of two conditions: low-display, low-interaction fidelity (representative of traditional FPS games) and high-display, high-interaction fidelity (similar to the real world).",
"abstracts": [
{
"abstractType": "Regular",
"content": "In recent years, consumers have witnessed a technological revolution that has delivered more-realistic experiences in their own homes through high-definition, stereoscopic televisions and natural, gesture-based video game consoles. Although these experiences are more realistic, offering higher levels of fidelity, it is not clear how the increased display and interaction aspects of fidelity impact the user experience. Since immersive virtual reality (VR) allows us to achieve very high levels of fidelity, we designed and conducted a study that used a six-sided CAVE to evaluate display fidelity and interaction fidelity independently, at extremely high and low levels, for a VR first-person shooter (FPS) game. Our goal was to gain a better understanding of the effects of fidelity on the user in a complex, performance-intensive context. The results of our study indicate that both display and interaction fidelity significantly affect strategy and performance, as well as subjective judgments of presence, engagement, and usability. In particular, performance results were strongly in favor of two conditions: low-display, low-interaction fidelity (representative of traditional FPS games) and high-display, high-interaction fidelity (similar to the real world).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In recent years, consumers have witnessed a technological revolution that has delivered more-realistic experiences in their own homes through high-definition, stereoscopic televisions and natural, gesture-based video game consoles. Although these experiences are more realistic, offering higher levels of fidelity, it is not clear how the increased display and interaction aspects of fidelity impact the user experience. Since immersive virtual reality (VR) allows us to achieve very high levels of fidelity, we designed and conducted a study that used a six-sided CAVE to evaluate display fidelity and interaction fidelity independently, at extremely high and low levels, for a VR first-person shooter (FPS) game. Our goal was to gain a better understanding of the effects of fidelity on the user in a complex, performance-intensive context. The results of our study indicate that both display and interaction fidelity significantly affect strategy and performance, as well as subjective judgments of presence, engagement, and usability. In particular, performance results were strongly in favor of two conditions: low-display, low-interaction fidelity (representative of traditional FPS games) and high-display, high-interaction fidelity (similar to the real world).",
"title": "Evaluating Display Fidelity and Interaction Fidelity in a Virtual Reality Game",
"normalizedTitle": "Evaluating Display Fidelity and Interaction Fidelity in a Virtual Reality Game",
"fno": "ttg2012040626",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Virtual Reality",
"Computer Games",
"Display Instrumentation",
"Human Computer Interaction",
"High Interaction Fidelity Condition",
"Display Fidelity Evaluation",
"Interaction Fidelity Evaluation",
"Virtual Reality Game",
"User Experience",
"Immersive Virtual Reality",
"Six Sided CAVE",
"VR First Person Shooter Game",
"Performance Intensive Context",
"Subjective Presence Judgement",
"Subjective Engagement Judgement",
"Subjective Usability Judgement",
"Low Display Condition",
"Low Interaction Fidelity Condition",
"High Display Condition",
"Games",
"Mice",
"Turning",
"Humans",
"Keyboards",
"Usability",
"Accuracy",
"Engagement",
"Virtual Reality",
"Display Fidelity",
"Interaction Fidelity",
"Presence"
],
"authors": [
{
"givenName": "R. P.",
"surname": "McMahan",
"fullName": "R. P. McMahan",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "D. A.",
"surname": "Bowman",
"fullName": "D. A. Bowman",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "D. J.",
"surname": "Zielinski",
"fullName": "D. J. Zielinski",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "R. B.",
"surname": "Brady",
"fullName": "R. B. Brady",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2012-04-01 00:00:00",
"pubType": "trans",
"pages": "626-633",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2007/0907/0/04142840",
"title": "Exploring 3D Interaction in Alternate Control-Display Space Mappings",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2007/04142840/12OmNBubOUh",
"parentPublication": {
"id": "proceedings/3dui/2007/0907/0",
"title": "2007 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2010/6846/0/05444727",
"title": "Evaluating natural interaction techniques in video games",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2010/05444727/12OmNx3Zjcm",
"parentPublication": {
"id": "proceedings/3dui/2010/6846/0",
"title": "2010 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nfc/2009/3577/0/3577a036",
"title": "Exploring Expressive NFC-Based Mobile Phone Interaction with Large Dynamic Displays",
"doi": null,
"abstractUrl": "/proceedings-article/nfc/2009/3577a036/12OmNxXUhTH",
"parentPublication": {
"id": "proceedings/nfc/2009/3577/0",
"title": "Near Field Communication, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2007/0907/0/04142839",
"title": "An Exploration of Interaction-Display Offset in Surround Screen Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2007/04142839/12OmNxeM49A",
"parentPublication": {
"id": "proceedings/3dui/2007/0907/0",
"title": "2007 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisis/2008/3109/0/3109a763",
"title": "PDA -- Remote Display Interaction Framework",
"doi": null,
"abstractUrl": "/proceedings-article/cisis/2008/3109a763/12OmNyL0TrV",
"parentPublication": {
"id": "proceedings/cisis/2008/3109/0",
"title": "2008 International Conference on Complex, Intelligent and Software Intensive Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isuc/2008/3433/0/3433a281",
"title": "Inertial Force Display - Concept and Implementation",
"doi": null,
"abstractUrl": "/proceedings-article/isuc/2008/3433a281/12OmNz2kqm0",
"parentPublication": {
"id": "proceedings/isuc/2008/3433/0",
"title": "2008 Second International Symposium on Universal Communication",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/pc/2010/02/mpc2010020072",
"title": "Human-Display Interaction Technology: Emerging Remote Interfaces for Pervasive Display Environments",
"doi": null,
"abstractUrl": "/magazine/pc/2010/02/mpc2010020072/13rRUxBa53l",
"parentPublication": {
"id": "mags/pc",
"title": "IEEE Pervasive Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/pc/2009/02/mpc2009020014",
"title": "Smart Phone Interaction with Registered Displays",
"doi": null,
"abstractUrl": "/magazine/pc/2009/02/mpc2009020014/13rRUyhaIm1",
"parentPublication": {
"id": "mags/pc",
"title": "IEEE Pervasive Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wslm/2022/0819/0/081900a038",
"title": "Design of Radar Display System Based on Human-computer Interaction Interface",
"doi": null,
"abstractUrl": "/proceedings-article/wslm/2022/081900a038/1KBdRVssvBe",
"parentPublication": {
"id": "proceedings/wslm/2022/0819/0",
"title": "2022 International Conference on Wearables, Sports and Lifestyle Management (WSLM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2017/2636/0/263600a274",
"title": "Efficiency Group Interaction Between Participants and Large Display",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2017/263600a274/1ap5wIzl6i4",
"parentPublication": {
"id": "proceedings/icvrv/2017/2636/0",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2012040617",
"articleId": "13rRUyfKIHI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2012040634",
"articleId": "13rRUyYSWsS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyaoDzi",
"title": "April",
"year": "2012",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyYSWsS",
"doi": "10.1109/TVCG.2012.40",
"abstract": "3D object selection is more demanding when, 1) objects densly surround the target object, 2) the target object is significantly occluded, and 3) when the target object is dynamically changing location. Most 3D selection techniques and guidelines were developed and tested on static or mostly sparse environments. In contrast, games tend to incorporate densly packed and dynamic objects as part of their typical interaction. With the increasing popularity of 3D selection in games using hand gestures or motion controllers, our current understanding of 3D selection needs revision. We present a study that compared four different selection techniques under five different scenarios based on varying object density and motion dynamics. We utilized two existing techniques, Raycasting and SQUAD, and developed two variations of them, Zoom and Expand, using iterative design. Our results indicate that while Raycasting and SQUAD both have weaknesses in terms of speed and accuracy in dense and dynamic environments, by making small modifications to them (i.e., flavoring), we can achieve significant performance increases.",
"abstracts": [
{
"abstractType": "Regular",
"content": "3D object selection is more demanding when, 1) objects densly surround the target object, 2) the target object is significantly occluded, and 3) when the target object is dynamically changing location. Most 3D selection techniques and guidelines were developed and tested on static or mostly sparse environments. In contrast, games tend to incorporate densly packed and dynamic objects as part of their typical interaction. With the increasing popularity of 3D selection in games using hand gestures or motion controllers, our current understanding of 3D selection needs revision. We present a study that compared four different selection techniques under five different scenarios based on varying object density and motion dynamics. We utilized two existing techniques, Raycasting and SQUAD, and developed two variations of them, Zoom and Expand, using iterative design. Our results indicate that while Raycasting and SQUAD both have weaknesses in terms of speed and accuracy in dense and dynamic environments, by making small modifications to them (i.e., flavoring), we can achieve significant performance increases.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "3D object selection is more demanding when, 1) objects densly surround the target object, 2) the target object is significantly occluded, and 3) when the target object is dynamically changing location. Most 3D selection techniques and guidelines were developed and tested on static or mostly sparse environments. In contrast, games tend to incorporate densly packed and dynamic objects as part of their typical interaction. With the increasing popularity of 3D selection in games using hand gestures or motion controllers, our current understanding of 3D selection needs revision. We present a study that compared four different selection techniques under five different scenarios based on varying object density and motion dynamics. We utilized two existing techniques, Raycasting and SQUAD, and developed two variations of them, Zoom and Expand, using iterative design. Our results indicate that while Raycasting and SQUAD both have weaknesses in terms of speed and accuracy in dense and dynamic environments, by making small modifications to them (i.e., flavoring), we can achieve significant performance increases.",
"title": "Dense and Dynamic 3D Selection for Game-Based Virtual Environments",
"normalizedTitle": "Dense and Dynamic 3D Selection for Game-Based Virtual Environments",
"fno": "ttg2012040634",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Virtual Reality",
"Computer Games",
"Iterative Methods",
"User Interfaces",
"Iterative Design",
"Dense 3 D Object Selection",
"Dynamic 3 D Object Selection",
"Game Based Virtual Environment",
"3 D Selection Techniques",
"3 D Selection Guidelines",
"Sparse Environment",
"Hand Gesture",
"Motion Controller",
"Object Density",
"Motion Dynamics",
"Raycasting Technique",
"SQUAD Technique",
"Zoom Variation",
"Expand Variation",
"Three Dimensional Displays",
"Games",
"Guidelines",
"Accuracy",
"Usability",
"Context",
"Color",
"Dense And Dynamic Objects",
"Interaction Techniques",
"Game Based Virtual Environments",
"3 D Object Selection"
],
"authors": [
{
"givenName": "J.",
"surname": "Cashion",
"fullName": "J. Cashion",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "C.",
"surname": "Wingrave",
"fullName": "C. Wingrave",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "J. J.",
"surname": "LaViola",
"fullName": "J. J. LaViola",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2012-04-01 00:00:00",
"pubType": "trans",
"pages": "634-642",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2011/0063/0/05759219",
"title": "Rapid and accurate 3D selection by progressive refinement",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2011/05759219/12OmNB1wkHF",
"parentPublication": {
"id": "proceedings/3dui/2011/0063/0",
"title": "2011 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismse/2004/2217/0/22170514",
"title": "Automatic Selection and Combination of Descriptors for Effective 3D Similarity Search",
"doi": null,
"abstractUrl": "/proceedings-article/ismse/2004/22170514/12OmNBC8Atb",
"parentPublication": {
"id": "proceedings/ismse/2004/2217/0",
"title": "Multimedia Software Engineering, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2006/0225/0/02250053",
"title": "Using the Non-Dominant Hand for Selection in 3D",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2006/02250053/12OmNBZpH8h",
"parentPublication": {
"id": "proceedings/3dui/2006/0225/0",
"title": "3D User Interfaces (3DUI'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2007/0907/0/04142849",
"title": "Balloon Selection: A Multi-Finger Technique for Accurate Low-Fatigue 3D Selection",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2007/04142849/12OmNBp52GJ",
"parentPublication": {
"id": "proceedings/3dui/2007/0907/0",
"title": "2007 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2010/6237/0/05444783",
"title": "GPU implementation of 3D object selection by conic volume techniques in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2010/05444783/12OmNBp52w1",
"parentPublication": {
"id": "proceedings/vr/2010/6237/0",
"title": "2010 IEEE Virtual Reality Conference (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sccc/2010/4400/0/4400a218",
"title": "Selection Techniques for Dense and Occluded Virtual 3D Environments, Supported by Depth Feedback: Double, Bound and Depth Bubble Cursors",
"doi": null,
"abstractUrl": "/proceedings-article/sccc/2010/4400a218/12OmNrAdsv3",
"parentPublication": {
"id": "proceedings/sccc/2010/4400/0",
"title": "2010 XXIX International Conference of the Chilean Computer Science Society",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2006/0225/0/02250095",
"title": "Group Selection Techniques for Efficient 3D Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2006/02250095/12OmNs5rkQt",
"parentPublication": {
"id": "proceedings/3dui/2006/0225/0",
"title": "3D User Interfaces (3DUI'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2006/0225/0/02250103",
"title": "Towards a General Model for Selection in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2006/02250103/12OmNwF0BWC",
"parentPublication": {
"id": "proceedings/3dui/2006/0225/0",
"title": "3D User Interfaces (3DUI'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2007/0907/0/04142854",
"title": "Exploring the Effects of Environment Density and Target Visibility on Object Selection in 3D Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2007/04142854/12OmNwK7o7o",
"parentPublication": {
"id": "proceedings/3dui/2007/0907/0",
"title": "2007 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122245",
"title": "Efficient Structure-Aware Selection Techniques for 3D Point Cloud Visualizations with 2DOF Input",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122245/13rRUxASupy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2012040626",
"articleId": "13rRUwIF6dN",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2012040643",
"articleId": "13rRUwjGoFW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyaoDzi",
"title": "April",
"year": "2012",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwjGoFW",
"doi": "10.1109/TVCG.2012.56",
"abstract": "Depth camera such as Microsoft Kinect, is much cheaper than conventional 3D scanning devices, and thus it can be acquired for everyday users easily. However, the depth data captured by Kinect over a certain distance is of extreme low quality. In this paper, we present a novel scanning system for capturing 3D full human body models by using multiple Kinects. To avoid the interference phenomena, we use two Kinects to capture the upper part and lower part of a human body respectively without overlapping region. A third Kinect is used to capture the middle part of the human body from the opposite direction. We propose a practical approach for registering the various body parts of different views under non-rigid deformation. First, a rough mesh template is constructed and used to deform successive frames pairwisely. Second, global alignment is performed to distribute errors in the deformation space, which can solve the loop closure problem efficiently. Misalignment caused by complex occlusion can also be handled reasonably by our global alignment algorithm. The experimental results have shown the efficiency and applicability of our system. Our system obtains impressive results in a few minutes with low price devices, thus is practically useful for generating personalized avatars for everyday users. Our system has been used for 3D human animation and virtual try on, and can further facilitate a range of home-oriented virtual reality (VR) applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Depth camera such as Microsoft Kinect, is much cheaper than conventional 3D scanning devices, and thus it can be acquired for everyday users easily. However, the depth data captured by Kinect over a certain distance is of extreme low quality. In this paper, we present a novel scanning system for capturing 3D full human body models by using multiple Kinects. To avoid the interference phenomena, we use two Kinects to capture the upper part and lower part of a human body respectively without overlapping region. A third Kinect is used to capture the middle part of the human body from the opposite direction. We propose a practical approach for registering the various body parts of different views under non-rigid deformation. First, a rough mesh template is constructed and used to deform successive frames pairwisely. Second, global alignment is performed to distribute errors in the deformation space, which can solve the loop closure problem efficiently. Misalignment caused by complex occlusion can also be handled reasonably by our global alignment algorithm. The experimental results have shown the efficiency and applicability of our system. Our system obtains impressive results in a few minutes with low price devices, thus is practically useful for generating personalized avatars for everyday users. Our system has been used for 3D human animation and virtual try on, and can further facilitate a range of home-oriented virtual reality (VR) applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Depth camera such as Microsoft Kinect, is much cheaper than conventional 3D scanning devices, and thus it can be acquired for everyday users easily. However, the depth data captured by Kinect over a certain distance is of extreme low quality. In this paper, we present a novel scanning system for capturing 3D full human body models by using multiple Kinects. To avoid the interference phenomena, we use two Kinects to capture the upper part and lower part of a human body respectively without overlapping region. A third Kinect is used to capture the middle part of the human body from the opposite direction. We propose a practical approach for registering the various body parts of different views under non-rigid deformation. First, a rough mesh template is constructed and used to deform successive frames pairwisely. Second, global alignment is performed to distribute errors in the deformation space, which can solve the loop closure problem efficiently. Misalignment caused by complex occlusion can also be handled reasonably by our global alignment algorithm. The experimental results have shown the efficiency and applicability of our system. Our system obtains impressive results in a few minutes with low price devices, thus is practically useful for generating personalized avatars for everyday users. Our system has been used for 3D human animation and virtual try on, and can further facilitate a range of home-oriented virtual reality (VR) applications.",
"title": "Scanning 3D Full Human Bodies Using Kinects",
"normalizedTitle": "Scanning 3D Full Human Bodies Using Kinects",
"fno": "ttg2012040643",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Solid Modelling",
"Avatars",
"Computer Animation",
"Interactive Devices",
"Home Oriented Virtual Reality Applications",
"3 D Full Human Body Model Scanning",
"Microsoft Kinect",
"3 D Scanning Devices",
"Depth Camera",
"Nonrigid Deformation",
"Rough Mesh Template",
"Successive Frame Deformation",
"Error Distribution",
"Loop Closure Problem",
"Global Alignment Algorithm",
"Personalized Avatars",
"3 D Human Animation",
"Three Dimensional Displays",
"Biological System Modeling",
"Image Reconstruction",
"Shape",
"Humans",
"Computational Modeling",
"Geometry",
"Microsoft Kinect",
"3 D Body Scanning",
"Global Non Igid Registration"
],
"authors": [
{
"givenName": null,
"surname": "Jing Tong",
"fullName": "Jing Tong",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Jin Zhou",
"fullName": "Jin Zhou",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Ligang Liu",
"fullName": "Ligang Liu",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Zhigeng Pan",
"fullName": "Zhigeng Pan",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Hao Yan",
"fullName": "Hao Yan",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2012-04-01 00:00:00",
"pubType": "trans",
"pages": "643-650",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/3dimpvt/2012/4873/0/4873a432",
"title": "Accurate Full Body Scanning from a Single Fixed 3D Camera",
"doi": null,
"abstractUrl": "/proceedings-article/3dimpvt/2012/4873a432/12OmNBOCWs8",
"parentPublication": {
"id": "proceedings/3dimpvt/2012/4873/0",
"title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dim/1997/7943/0/79430266",
"title": "Optimal Postures and Positioning for Human Body Scanning",
"doi": null,
"abstractUrl": "/proceedings-article/3dim/1997/79430266/12OmNBdJ5hx",
"parentPublication": {
"id": "proceedings/3dim/1997/7943/0",
"title": "3D Digital Imaging and Modeling, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2008/3381/0/3381a335",
"title": "Automatic Surface Scanning of 3D Artifacts",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2008/3381a335/12OmNvRU0nM",
"parentPublication": {
"id": "proceedings/cw/2008/3381/0",
"title": "2008 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dimpvt/2012/4873/0/4873a192",
"title": "Markerless Motion Capture of Human Body Using PSO with Single Depth Camera",
"doi": null,
"abstractUrl": "/proceedings-article/3dimpvt/2012/4873a192/12OmNvqEvJl",
"parentPublication": {
"id": "proceedings/3dimpvt/2012/4873/0",
"title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995321",
"title": "Structured light 3D scanning in the presence of global illumination",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995321/12OmNwBjP7F",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icbmi/2011/4623/0/4623a156",
"title": "3D Ultrasound Data Acqusition System Based on Back End Scan Mode",
"doi": null,
"abstractUrl": "/proceedings-article/icbmi/2011/4623a156/12OmNy3RRDW",
"parentPublication": {
"id": "proceedings/icbmi/2011/4623/0",
"title": "Intelligent Computation and Bio-Medical Instrumentation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dmdcm/2011/4413/0/4413a097",
"title": "A Study on Natural Interaction for Human Body Motion Using Depth Image Data",
"doi": null,
"abstractUrl": "/proceedings-article/dmdcm/2011/4413a097/12OmNznCkZN",
"parentPublication": {
"id": "proceedings/dmdcm/2011/4413/0",
"title": "Digital Media and Digital Content Management, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icic/2010/4047/2/4047b201",
"title": "Fitting the Sectional Curve for 3D Human Body Based on Non-uniform B-spline",
"doi": null,
"abstractUrl": "/proceedings-article/icic/2010/4047b201/12OmNzw8jgm",
"parentPublication": {
"id": "proceedings/icic/2010/4047/1",
"title": "2010 Third International Conference on Information and Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2013/05/ttp2013051039",
"title": "Algorithms for 3D Shape Scanning with a Depth Camera",
"doi": null,
"abstractUrl": "/journal/tp/2013/05/ttp2013051039/13rRUxNW1UZ",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2007/07/r7028",
"title": "3D Body Scanning and Healthcare Applications",
"doi": null,
"abstractUrl": "/magazine/co/2007/07/r7028/13rRUypGGeq",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2012040634",
"articleId": "13rRUyYSWsS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2012040651",
"articleId": "13rRUxAATgv",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXnFu4",
"name": "ttg2012040643s1.mov",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2012040643s1.mov",
"extension": "mov",
"size": "35 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyaoDzi",
"title": "April",
"year": "2012",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxAATgv",
"doi": "10.1109/TVCG.2012.48",
"abstract": "We present a prototype system for interactive construction and modification of 3D physical models using building blocks.Our system uses a depth sensing camera and a novel algorithm for acquiring and tracking the physical models. The algorithm,Lattice-First, is based on the fact that building block structures can be arranged in a 3D point lattice where the smallest block unit is a basis in which to derive all the pieces of the model. The algorithm also makes it possible for users to interact naturally with the physical model as it is acquired, using their bare hands to add and remove pieces. We present the details of our algorithm, along with examples of the models we can acquire using the interactive system. We also show the results of an experiment where participants modify a block structure in the absence of visual feedback. Finally, we discuss two proof-of-concept applications: a collaborative guided assembly system where one user is interactively guided to build a structure based on another user's design, and a game where the player must build a structure that matches an on-screen silhouette.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a prototype system for interactive construction and modification of 3D physical models using building blocks.Our system uses a depth sensing camera and a novel algorithm for acquiring and tracking the physical models. The algorithm,Lattice-First, is based on the fact that building block structures can be arranged in a 3D point lattice where the smallest block unit is a basis in which to derive all the pieces of the model. The algorithm also makes it possible for users to interact naturally with the physical model as it is acquired, using their bare hands to add and remove pieces. We present the details of our algorithm, along with examples of the models we can acquire using the interactive system. We also show the results of an experiment where participants modify a block structure in the absence of visual feedback. Finally, we discuss two proof-of-concept applications: a collaborative guided assembly system where one user is interactively guided to build a structure based on another user's design, and a game where the player must build a structure that matches an on-screen silhouette.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a prototype system for interactive construction and modification of 3D physical models using building blocks.Our system uses a depth sensing camera and a novel algorithm for acquiring and tracking the physical models. The algorithm,Lattice-First, is based on the fact that building block structures can be arranged in a 3D point lattice where the smallest block unit is a basis in which to derive all the pieces of the model. The algorithm also makes it possible for users to interact naturally with the physical model as it is acquired, using their bare hands to add and remove pieces. We present the details of our algorithm, along with examples of the models we can acquire using the interactive system. We also show the results of an experiment where participants modify a block structure in the absence of visual feedback. Finally, we discuss two proof-of-concept applications: a collaborative guided assembly system where one user is interactively guided to build a structure based on another user's design, and a game where the player must build a structure that matches an on-screen silhouette.",
"title": "Interactive 3D Model Acquisition and Tracking of Building Block Structures",
"normalizedTitle": "Interactive 3D Model Acquisition and Tracking of Building Block Structures",
"fno": "ttg2012040651",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"User Interfaces",
"Cameras",
"Data Acquisition",
"Solid Modelling",
"On Screen Silhouette",
"Interactive 3 D Model",
"3 D Model Acquisition",
"3 D Model Tracking",
"Building Block Structure",
"Interactive Construction",
"Interactive Modification",
"3 D Physical Model",
"Depth Sensing Camera",
"Lattice First Algorithm",
"3 D Point Lattice",
"Visual Feedback",
"User Interaction",
"Collaborative Guided Assembly System",
"User Design",
"Solid Modeling",
"Lattices",
"Three Dimensional Displays",
"Computational Modeling",
"Cameras",
"Image Color Analysis",
"Visualization",
"Building Block Structures",
"Interactive Physical Model Building",
"3 D Model Acquisition",
"Object Tracking",
"Depth Cameras"
],
"authors": [
{
"givenName": "A.",
"surname": "Miller",
"fullName": "A. Miller",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "B.",
"surname": "White",
"fullName": "B. White",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "E.",
"surname": "Charbonneau",
"fullName": "E. Charbonneau",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Z.",
"surname": "Kanzler",
"fullName": "Z. Kanzler",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "J. J.",
"surname": "LaViola",
"fullName": "J. J. LaViola",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2012-04-01 00:00:00",
"pubType": "trans",
"pages": "651-659",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/1999/0210/0/02100340",
"title": "Towards Interactive Finite Element Analysis of Shell Structures in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/iv/1999/02100340/12OmNAIvd0i",
"parentPublication": {
"id": "proceedings/iv/1999/0210/0",
"title": "1999 IEEE International Conference on Information Visualization (Cat. No. PR00210)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gmp/2000/0562/0/05620033",
"title": "Rapid 3D Model Acquisition from Images of Small Objects",
"doi": null,
"abstractUrl": "/proceedings-article/gmp/2000/05620033/12OmNBbaH9z",
"parentPublication": {
"id": "proceedings/gmp/2000/0562/0",
"title": "Geometric Modeling and Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccps/2012/4695/0/4695a223",
"title": "Demo Abstract: Towards a Wireless Building Management System with Minimum Change to the Building Protocols",
"doi": null,
"abstractUrl": "/proceedings-article/iccps/2012/4695a223/12OmNButpWP",
"parentPublication": {
"id": "proceedings/iccps/2012/4695/0",
"title": "Cyber-Physical Systems, IEEE/ACM International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isise/2008/3494/2/3494b654",
"title": "Analysis on Interactive Structure of Knowledge Acquisition",
"doi": null,
"abstractUrl": "/proceedings-article/isise/2008/3494b654/12OmNwO5LU2",
"parentPublication": {
"id": "proceedings/isise/2008/3494/2",
"title": "2008 International Symposium on Information Science and Engineering (ISISE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icinis/2008/3391/0/3391a495",
"title": "Automatic Texture Acquisition for 3D Model Using Oblique Aerial Images",
"doi": null,
"abstractUrl": "/proceedings-article/icinis/2008/3391a495/12OmNyYDDMj",
"parentPublication": {
"id": "proceedings/icinis/2008/3391/0",
"title": "Intelligent Networks and Intelligent Systems, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icm/2011/4522/2/4522b034",
"title": "Component Warehouse Driven by Workflow Based on Building Block",
"doi": null,
"abstractUrl": "/proceedings-article/icm/2011/4522b034/12OmNyaoDx2",
"parentPublication": {
"id": "proceedings/icm/2011/4522/2",
"title": "Information Technology, Computer Engineering and Management Sciences, International Conference of",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itcs/2009/3688/1/3688a613",
"title": "Implementation of the Interactive Gestures of Virtual Avatar Based on a Multi-user Virtual Learning Environment",
"doi": null,
"abstractUrl": "/proceedings-article/itcs/2009/3688a613/12OmNz4Bdnz",
"parentPublication": {
"id": "proceedings/itcs/2009/3688/1",
"title": "Information Technology and Computer Science, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/biomedcom/2012/4938/0/4938a132",
"title": "Introducing Security Building Block Models",
"doi": null,
"abstractUrl": "/proceedings-article/biomedcom/2012/4938a132/12OmNzSQdmV",
"parentPublication": {
"id": "proceedings/biomedcom/2012/4938/0",
"title": "ASE/IEEE International Conference on BioMedical Computing (BioMedCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1997/7822/0/78221074",
"title": "Hyper-patches for 3D model acquisition and tracking",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1997/78221074/12OmNzSh18X",
"parentPublication": {
"id": "proceedings/cvpr/1997/7822/0",
"title": "Proceedings of IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1996/02/mcg1996020031",
"title": "Put: Language-Based Interactive Manipulation of Objects",
"doi": null,
"abstractUrl": "/magazine/cg/1996/02/mcg1996020031/13rRUx0xPpl",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2012040643",
"articleId": "13rRUwjGoFW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg20120400xv",
"articleId": "13rRUyeTVhY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyaoDzi",
"title": "April",
"year": "2012",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyeTVhY",
"doi": "10.1109/TVCG.2012.36",
"abstract": null,
"abstracts": [
{
"abstractType": "Regular",
"content": "",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": null,
"title": "Author Index",
"normalizedTitle": "Author Index",
"fno": "ttg20120400xv",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "04",
"pubDate": "2012-04-01 00:00:00",
"pubType": "trans",
"pages": "xv-xv",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "ttg2012040651",
"articleId": "13rRUxAATgv",
"__typename": "AdjacentArticleType"
},
"next": null,
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNApu5xD",
"title": "October-December",
"year": "2002",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "8",
"label": "October-December",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwbJD4D",
"doi": "10.1109/TVCG.2002.1044517",
"abstract": "Abstract—Image databases are nowadays widely exploited in a number of different contexts, ranging from history of art, through medicine, to education. Existing querying paradigms are based either on the usage of textual strings, for high-level semantic queries or on 2D visual examples for the expression of perceptual queries. Semantic queries require manual annotation of the database images. Instead, perceptual queries only require that image analysis is performed on the database images in order to extract salient perceptual features that are matched with those of the example. However, usage of 2D examples is generally inadequate as effective authoring of query images, attaining a realistic reproduction of complex scenes, needs manual editing and sketching ability. Investigation of new querying paradigms is therefore an important—yet still marginally investigated—factor for the success of content-based image retrieval. In this paper, a novel querying paradigm is presented which is based on usage of 3D interfaces exploiting navigation and editing of 3D virtual environments. Query images are obtained by taking a snapshot of the framed environment and by using the snapshot as an example to retrieve similar database images. A comparative analysis is carried out between the usage of 3D and 2D interfaces and their related query paradigms. This analysis develops on a user test on retrieval efficiency and effectiveness, as well as on an evaluation of users' satisfaction.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—Image databases are nowadays widely exploited in a number of different contexts, ranging from history of art, through medicine, to education. Existing querying paradigms are based either on the usage of textual strings, for high-level semantic queries or on 2D visual examples for the expression of perceptual queries. Semantic queries require manual annotation of the database images. Instead, perceptual queries only require that image analysis is performed on the database images in order to extract salient perceptual features that are matched with those of the example. However, usage of 2D examples is generally inadequate as effective authoring of query images, attaining a realistic reproduction of complex scenes, needs manual editing and sketching ability. Investigation of new querying paradigms is therefore an important—yet still marginally investigated—factor for the success of content-based image retrieval. In this paper, a novel querying paradigm is presented which is based on usage of 3D interfaces exploiting navigation and editing of 3D virtual environments. Query images are obtained by taking a snapshot of the framed environment and by using the snapshot as an example to retrieve similar database images. A comparative analysis is carried out between the usage of 3D and 2D interfaces and their related query paradigms. This analysis develops on a user test on retrieval efficiency and effectiveness, as well as on an evaluation of users' satisfaction.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—Image databases are nowadays widely exploited in a number of different contexts, ranging from history of art, through medicine, to education. Existing querying paradigms are based either on the usage of textual strings, for high-level semantic queries or on 2D visual examples for the expression of perceptual queries. Semantic queries require manual annotation of the database images. Instead, perceptual queries only require that image analysis is performed on the database images in order to extract salient perceptual features that are matched with those of the example. However, usage of 2D examples is generally inadequate as effective authoring of query images, attaining a realistic reproduction of complex scenes, needs manual editing and sketching ability. Investigation of new querying paradigms is therefore an important—yet still marginally investigated—factor for the success of content-based image retrieval. In this paper, a novel querying paradigm is presented which is based on usage of 3D interfaces exploiting navigation and editing of 3D virtual environments. Query images are obtained by taking a snapshot of the framed environment and by using the snapshot as an example to retrieve similar database images. A comparative analysis is carried out between the usage of 3D and 2D interfaces and their related query paradigms. This analysis develops on a user test on retrieval efficiency and effectiveness, as well as on an evaluation of users' satisfaction.",
"title": "Three-Dimensional Interfaces for Querying by Example in Content-Based Image Retrieval",
"normalizedTitle": "Three-Dimensional Interfaces for Querying by Example in Content-Based Image Retrieval",
"fno": "v0305",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Content Based Image Retrieval",
"3 D User Interfaces"
],
"authors": [
{
"givenName": "Jürgen",
"surname": "Assfalg",
"fullName": "Jürgen Assfalg",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Alberto",
"surname": "Del Bimbo",
"fullName": "Alberto Del Bimbo",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Pietro",
"surname": "Pala",
"fullName": "Pietro Pala",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2002-10-01 00:00:00",
"pubType": "trans",
"pages": "305-318",
"year": "2002",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "v0319",
"articleId": "13rRUwbaqUF",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNApu5xD",
"title": "October-December",
"year": "2002",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "8",
"label": "October-December",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwbaqUF",
"doi": "10.1109/TVCG.2002.1044518",
"abstract": "Abstract—A new method for the visualization of state transition systems is presented. Visual information is reduced by clustering nodes, forming a tree structure of related clusters. This structure is visualized in three dimensions with concepts from cone trees and emphasis on symmetry. A number of interactive options are provided as well, allowing the user to superimpose detail information on this tree structure. The resulting visualization enables the user to relate features in the visualization of the state transition graph to semantic concepts in the corresponding process and vice versa.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—A new method for the visualization of state transition systems is presented. Visual information is reduced by clustering nodes, forming a tree structure of related clusters. This structure is visualized in three dimensions with concepts from cone trees and emphasis on symmetry. A number of interactive options are provided as well, allowing the user to superimpose detail information on this tree structure. The resulting visualization enables the user to relate features in the visualization of the state transition graph to semantic concepts in the corresponding process and vice versa.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—A new method for the visualization of state transition systems is presented. Visual information is reduced by clustering nodes, forming a tree structure of related clusters. This structure is visualized in three dimensions with concepts from cone trees and emphasis on symmetry. A number of interactive options are provided as well, allowing the user to superimpose detail information on this tree structure. The resulting visualization enables the user to relate features in the visualization of the state transition graph to semantic concepts in the corresponding process and vice versa.",
"title": "Interactive Visualization of State Transition Systems",
"normalizedTitle": "Interactive Visualization of State Transition Systems",
"fno": "v0319",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Graph Visualization",
"Transition Systems",
"State Spaces",
"Cone Trees"
],
"authors": [
{
"givenName": "Frank",
"surname": "van Ham",
"fullName": "Frank van Ham",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huub",
"surname": "van de Wetering",
"fullName": "Huub van de Wetering",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jarke J.",
"surname": "van Wijk",
"fullName": "Jarke J. van Wijk",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2002-10-01 00:00:00",
"pubType": "trans",
"pages": "319-329",
"year": "2002",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0305",
"articleId": "13rRUwbJD4D",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0330",
"articleId": "13rRUyfKIHy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |