@inproceedings{Diverdi,
abstract = {Anywhere augmentation pursues the goal of lowering the initial investment of time and money necessary to participate in mixed reality work, bridging the gap between researchers in the field and regular computer users. Our paper contributes to this goal by introducing the GroundCam, a cheap tracking modality with no significant setup necessary. By itself, the GroundCam provides high frequency, high resolution relative position information similar to an inertial navigation system, but with significantly less drift. When coupled with a wide area tracking modality via a complementary Kalman filter, the hybrid tracker becomes a powerful base for indoor and outdoor mobile mixed reality work},
address = {Charlotte, NC},
author = {DiVerdi, Stephen and Hollerer, Tobias},
booktitle = {VR '07: Proceedings of the 2007 Vritual Reality Conference},
doi = {10.1109/VR.2007.352466},
file = {:D$\backslash$:/\_Docs/mendeleyMain/GroundCam A Tracking Modality for Mobile Mixed Reality - DiVerdi, Hollerer - 2007.pdf:pdf},
isbn = {1-4244-0905-5},
keywords = {anywhere augmentation,ing,mobile mixed reality,tracker fusion,vision-based track-},
pages = {75--82},
publisher = {IEEE Computer Society},
title = {{GroundCam: A Tracking Modality for Mobile Mixed Reality}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4161008},
year = {2007}
}
@inproceedings{Peternier2006,
abstract = {We have designed a wearable Mixed Reality (MR) framework which allows to real-time render game-like 3D scenes on see-through head-mounted displays (see through HMDs) and to localize the user position within a known internet wireless area. Our equipment weights less than 1 Pound (0.45 Kilos). The information visualized on the mobile device could be sent on-demand from a remote server and realtime rendered onboard.We present our PDA-based platform as a valid alternative to use in wearable MR contexts under less mobility and encumbering constraints: our approach eliminates the typical backpack with a laptop, a GPS antenna and a heavy HMD usually required in this cases. A discussion about our results and user experiences with our approach using a handheld for 3D rendering is presented as well.},
author = {Peternier, Achille and Vexo, Fr\'{e}d\'{e}ric and Thalmann, Daniel},
booktitle = {EGVE '06: Proceedings of the 12th Eurographics Symposium on Virtual Environments},
editor = {Hubbold, Roger and Lin, Ming},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Wearable Mixed Reality System In Less Than 1 Pound - Peternier, Vexo, Thalmann - 2006.pdf:pdf},
pages = {35--44},
title = {{Wearable Mixed Reality System In Less Than 1 Pound}},
year = {2006}
}
@article{Rodriguez2004,
abstract = {Hospital workers are highly mobile; they are constantly changing location to perform their daily work, which includes visiting patients, locating resources, such as medical records, or consulting with other specialists. The information required by these specialists is highly dependent on their location. Access to a patient's laboratory results might be more relevant when the physician is near the patient's bed and not elsewhere. We describe a location-aware medical information system that was developed to provide access to resources such as patient's records or the location of a medical specialist, based on the user's location. The system is based on a handheld computer which includes a trained backpropagation neural-network used to estimate the user's location and a client to access information from the hospital information system that is relevant to the user's current location.},
author = {Rodr\'{\i}guez, Marcela D and Favela, Jesus and Mart\'{\i}nez, Edgar a and Mu\~{n}oz, Miguel a},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Location-aware access to hospital information and services. - Rodr\'{\i}guez et al. - 2004.pdf:pdf},
issn = {1089-7771},
journal = {IEEE transactions on information technology in biomedicine : a publication of the IEEE Engineering in Medicine and Biology Society},
keywords = {Algorithms,Computer Communication Networks,Computers, Handheld,Database Management Systems,Geographic Information Systems,Information Storage and Retrieval,Medical Records Systems, Computerized,Neural Networks (Computer),Telemedicine,Telemedicine: methods,User-Computer Interface},
month = dec,
number = {4},
pages = {448--55},
pmid = {15615035},
title = {{Location-aware access to hospital information and services.}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/15615035},
volume = {8},
year = {2004}
}
@inproceedings{Schmeil2001,
address = {New York, New York, USA},
author = {Schmeil, Andreas and Broll, Wolfgang},
booktitle = {ACM SIGGRAPH 2006 Sketches},
doi = {10.1145/1179849.1180025},
file = {:D$\backslash$:/\_Docs/mendeleyMain/MARA An Augmented Personal Assistant and Companion - Schmeil, Broll - 2006.pdf:pdf},
isbn = {1595933646},
pages = {141},
publisher = {ACM Press},
title = {{MARA: An Augmented Personal Assistant and Companion}},
url = {http://portal.acm.org/citation.cfm?doid=1179849.1180025},
year = {2006}
}
@inproceedings{Cheung,
author = {Cheung, Kenneth C. and Intille, Stephen S. and Larson, Kent},
booktitle = {UbiComp 2006: Proceedings of the 8th International Conference on Ubiquitous Computing},
file = {:D$\backslash$:/\_Docs/mendeleyMain/An Inexpensive Bluetooth-Based Indoor Positioning Hack - Cheung, Intille, Larson - Unknown.pdf:pdf},
keywords = {and intrinsic characteristics of,consensus in published work,except those researchers who,had actually tried it,implies that the standards,on bluetooth positioning,the},
title = {{An Inexpensive Bluetooth-Based Indoor Positioning Hack}},
year = {2006}
}
@inproceedings{Rekimoto1995,
author = {Rekimoto, Jun},
booktitle = {ICAT/VRST ’95: Proceedings of the International Conference on Artificial Reality and Tele-Existence Conference on Virtual Reality Software and Technology},
file = {:D$\backslash$:/\_Docs/mendeleyMain/The magnifying glass approach to augmented reality systems - Rekimoto - 1995.pdf:pdf},
pages = {123--132},
title = {{The magnifying glass approach to augmented reality systems}},
year = {1995}
}
@inproceedings{Azuma2006,
address = {(Santa Barbara, CA},
author = {Azuma, Ronald T. and Neely, Howard and Daily, Mike and Leonard, Jon},
booktitle = {ISMAR '06: Proceedings of the 5th IEEE and ACM International Symposium on Mixed and Augmented Reality},
doi = {10.1109/ISMAR.2006.297798},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Performance analysis of an outdoor augmented reality tracking system that relies upon a few mobile beacons - Azuma et al. - 2006.pdf:pdf},
isbn = {1-4244-0650-1},
keywords = {monte carlo analysis,simulation,uav},
month = oct,
pages = {101--104},
publisher = {IEEE Computer Society},
title = {{Performance analysis of an outdoor augmented reality tracking system that relies upon a few mobile beacons}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4079260},
year = {2006}
}
@inproceedings{Olwal2006,
abstract = {The vision of spatially aware handheld interaction devices has been hard to realize. The difficulties in solving the general tracking problem for small devices have been addressed by several research groups and examples of issues are performance, hardware availability and platform independency. We present LightSense, an approach that employs commercially available components to achieve robust tracking of cell phone LEDs, without any modifications to the device. Cell phones can thus be promoted to interaction and display devices in ubiquitous installations of systems such as the ones we present here. This could enable a new generation of spatially aware handheld interaction devices that would unobtrusively empower and assist us in our everyday tasks.},
address = {Santa Barbara, CA},
author = {Olwal, Alex},
booktitle = {ISMAR '06: Proceedings of the 5th IEEE and ACM International Symposium on Mixed and Augmented Reality},
doi = {10.1109/ISMAR.2006.297802},
file = {:D$\backslash$:/\_Docs/mendeleyMain/LightSense Enabling Spatially Aware Handheld Interaction Devices - Olwal - 2006.pdf:pdf},
isbn = {1-4244-0650-1},
keywords = {augmented reality,cell phone,handheld,led,mixed reality,mobile,portable,spa-,tially aware,ubiquitous},
month = oct,
pages = {119--122},
publisher = {IEEE Computer Society},
title = {{LightSense: enabling spatially aware handheld interaction devices}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4079264},
year = {2006}
}
@inproceedings{Vogt2002,
abstract = {We have built a system for augmented reality visualization based on a single head mounted tracking camera. The camera includes an infrared illuminator and works in conjunction with a set of retro-reflective markers that are placed around the workspace. This marker frame configuration delivers excellent pose information, which translates to stable, jitter-free augmentation. In this article, we describe using the same single camera system for tracking relatively small marker clusters, which can be used for tool or instrument tracking. Tracking of such a marker cluster is more susceptible to noise compared to tracking of a marker frame, mainly due to its small image coverage. The sensitivity to noise is studied using Monte Carlo simulations and verified in an experimental setup. We achieved jitter-free augmentation with an optimized cluster design.},
address = {Washington, DC, USA},
author = {Vogt, S. and Khamene, A. and Sauer, F. and Niemann, H.},
booktitle = {ISMAR '02: Proceedings of the 1st IEEE and ACM International Symposium on Mixed and Augmented Reality},
doi = {10.1109/ISMAR.2002.1115082},
isbn = {0-7695-1781-1},
pages = {127--136},
publisher = {IEEE Computer Society},
title = {{Single camera tracking of marker clusters: multiparameter cluster optimization and experimental verification}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=1115082},
year = {2002}
}
@inproceedings{Cho1998,
abstract = {In Augmented Reality (AR), a user can see a virtual world as well as a real world. To avoid the registration problem between the virtual world and the real world, the user’s pose in both worlds should be exactly the same. Fiducial tracking AR is an attractive approach to the registration problem, but most of the developed fiducial tracking AR systems have very limited tracking ranges and require carefully prepared environments, especially lighting conditions. To provide for wide views and detailed views in large-scale applications, an AR system should have a scalable tracking capability under varying light condition. In this paper, we propose multi-ring color fiducial systems and a light- invariant fiducial detection method for scalable fiducial tracking AR systems. We analyze the optimal ring width, and develop formulas to obtain the optimal fiducial set with system specific inputs. We present a light-invariant circular fiducial detection method that uses relations among fiducials and their backgrounds for segmenting regions of an image. Our work provides a simple and convenient way to achieve wide-area tracking for AR.},
address = {Natick, MA, USA},
author = {Cho, Youngkwan and Lee, Jongweon and Neumann, Ulrich},
booktitle = {IWAR '98: Proceedings of the 1st IEEE and ACM International Workshop on Augmented Reality},
file = {:D$\backslash$:/\_Docs/mendeleyMain/A Multi-ring Color Fiducial System and An Intensity-invariant Detection Method for Scalable Fiducial-Tracking Augmented Reality - Cho, Lee, Neumann - 1998.pdf:pdf},
keywords = {augmented reality,concentric fiducial,edge detection,fiducial-tracking,multi-ring fiducial,region segmentation,rule-based detection},
pages = {1--15},
title = {{A Multi-ring Color Fiducial System and An Intensity-invariant Detection Method for Scalable Fiducial-Tracking Augmented Reality}},
year = {1998}
}
@inproceedings{Newman2001,
abstract = {Augmented reality (AR) both exposes and supplements the user's view of the real world. Previous AR work has focussed on the close registration of real and virtual objects, which requires very accurate real-time estimates of head position and orientation. Most of these systems have been tethered and restricted to small volumes. In contrast, we have chosen to concentrate on allowing the AR user to roam freely within an entire building. At AT\&T Laboratories Cambridge we provide personnel with AR services using data from an ultrasonic tracking system, called the Bat system, which has been deployed building-wide. We have approached the challenge of implementing a wide-area, in-building AR system in two different ways. The first uses a head-mounted display connected to a laptop, which combines sparse position measurements from the Bat system with more frequent rotational information from an inertial tracker to render annotations and virtual objects that relate to or coexist with the real world. The second uses a PDA to provide a convenient portal with which the user can quickly view the augmented world. These systems can be used to annotate the world in a more-or-less seamless way, allowing a richer interaction with both real and virtual objects},
address = {New York, New York, USA},
author = {Newman, Joseph and Ingram, David and Hopper, Andy},
booktitle = {ISAR '01: Proceedings of the IEEE and ACM International Symposium on Augmented Reality},
doi = {10.1109/ISAR.2001.970517},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Augmented reality in a wide area sentient environment - Newman, Ingram, Hopper - 2001.pdf:pdf},
isbn = {0-7695-1375-1},
pages = {77--86},
publisher = {IEEE Computer Society},
title = {{Augmented reality in a wide area sentient environment}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=970517},
year = {2001}
}
@inproceedings{Klinker2000,
abstract = {Augmented reality (AR) constitutes a new user interface paradigm. Using light head-sets and hand-held or worn computing equipment, users can roam their daily working environment while being continuously in contact with their computer systems. For AR to work properly in a large factory, new system architectures have to be designed with consideration of the special requirements imposed by AR. In particular, AR requires real-time facilities to track the user's position and viewing direction. In the past, various carefully calibrated sensing devices have been used for this purpose, including magnetic trackers and active LED systems. Research is now focusing on computer vision-based methods. It is our hypothesis that, in the future, the most successful indoor approaches will combine local (user-worn) vision-based tracking methods with global user tracking schemes from fixed (wall-mounted) sensors, using mobile wireless networking technology to allow the user-worn system to communicate with the globally available sensing infrastructure in an intelligent building. We propose the concept of “AR-ready” intelligent buildings which provide built-in tracking services via different sensing modalities},
address = {Munich , Germany},
author = {Klinker, Gudrun and Reicher, Thomas and Br\"{u}gge, Bernd},
booktitle = {ISAR '00: Proceedings of the IEEE and ACM International Symposium on Augmented Reality},
doi = {10.1109/ISAR.2000.880921},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Distributed user tracking concepts for augmented reality applications - Klinker, Reicher, Br\"{u}gge - 2000.pdf:pdf},
isbn = {0-7695-0846-4},
pages = {37--44},
publisher = {IEEE Computer Society},
title = {{Distributed user tracking concepts for augmented reality applications}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=880921},
year = {2000}
}
@inproceedings{Newman2004,
abstract = {Augmented Reality (AR) provides a natural interface to the "calm" pervasive technology anticipated in large-scale Ubiquitous Computing environments. However, the range of classic AR applications has been limited by the scope, range and cost of sensors used for tracking. Hybrid tracking approaches can go some way to extending this range. We propose an approach, called Ubiquitous Tracking, in which data from widespread and diverse heterogeneous tracking sensors is automatically and dynamically fused, and then transparently provided to applications. A formal model represents spatial relationships between objects as a graph attributed with quality-of-service parameters. This paper presents a software implementation, in which a dynamic data flow network of distributed software components is thereby constructed in response to queries and optimisation criteria specified by applications. This implementation is demonstrated using a small laboratory example, and larger setups modelled in a simulation environment.},
address = {Washington, DC},
author = {Newman, Joseph and Wagner, Martin and Bauer, Martin and MacWilliams, Asa and Pintaric, Thomas and Beyer, Dagmar and Pustka, Daniel and Strasser, Franz and Schmalstieg, Dieter and Klinker, Gudrun},
booktitle = {ISMAR '04: Proceedings of the 3rd IEEE and ACM International Symposium on Mixed and Augmented Reality},
doi = {10.1109/ISMAR.2004.62},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Ubiquitous Tracking for Augmented Reality - Newman et al. - Unknown.pdf:pdf},
isbn = {0-7695-2191-6},
pages = {192--201},
publisher = {IEEE Computer Society},
title = {{Ubiquitous Tracking for Augmented Reality}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=1383056},
year = {2004}
}
@phdthesis{MacWilliams2004a,
author = {MacWilliams, Asa and Br\"{u}gge, Bernd and Reicher, Thomas},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Study on Software Architectures for Augmented Reality Systems - MacWilliams, Br\"{u}gge, Reicher - 2004.pdf:pdf},
pages = {81},
school = {Technische Universit\"{a}t M\"{u}nchen},
title = {{Study on Software Architectures for Augmented Reality Systems}},
year = {2004}
}
@article{Cheok2004,
abstract = {Human Pacman is a novel interactive entertainment system that ventures to embed the natural physical world seamlessly with a fantasy virtual playground by capitalizing on mobile computing, wireless LAN, ubiquitous computing, and motion-tracking technologies. Our human Pacman research is a physical role-playing augmented-reality computer fantasy together with real human–social and mobile gaming. It emphasizes collaboration and competition between players in a wide outdoor physical area which allows natural wide-area human–physical movements. Pacmen and Ghosts are now real human players in the real world, experiencing mixed computer graphics fantasy–reality provided by using the wearable computers. Virtual cookies and actual tangible physical objects are incorporated into the game play to provide novel experiences of seamless transitions between real and virtual worlds. We believe human Pacman is pioneering a new form of gaming that anchors on physicality, mobility, social interaction, and ubiquitous computing.},
author = {Cheok, Adrian David and Goh, Kok Hwee and Liu, Wei and Farbiz, Farzam and Fong, Siew Wan and Teo, Sze Lee and Li, Yu and Yang, Xubo},
doi = {10.1007/s00779-004-0267-x},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Human Pacman a mobile, wide-area entertainment system based on physical, social, and ubiquitous computing - Cheok et al. - 2004.pdf:pdf},
issn = {1617-4909},
journal = {Personal and Ubiquitous Computing},
month = may,
number = {2},
pages = {71--81},
title = {{Human Pacman: a mobile, wide-area entertainment system based on physical, social, and ubiquitous computing}},
url = {http://www.springerlink.com/openurl.asp?genre=article\&id=doi:10.1007/s00779-004-0267-x},
volume = {8},
year = {2004}
}
@inproceedings{Nakayama2009,
abstract = {In this paper, we present the AI Goggles system, which can instantly describe objects and scenes in the real world and retrieve visual memories about them using keywords input by the users. This is a stand-alone wearable system working on a tiny mobile computer. Also, the system can quickly learn unknown objects and scenes by teaching and learn to label and retrieve them on site, without loss of recognition ability for previously learnt ones. As the core algorithm of the system, we propose and implement a new method of multi labeling and retrieval of unconstrained real-world images. Our method outperforms the current state-of-the-art method, in terms of both accuracy and computation speed on the standard benchmark dataset. This is a major contribution to development of visual and memory assistive man-machine user interface.},
address = {Kelowna, BC},
author = {Nakayama, Hideki and Harada, Tatsuya and Kuniyoshi, Yasuo},
booktitle = {Proceedings of the 2009 Canadian Conference on Computer and Robot Vision},
doi = {10.1109/CRV.2009.9},
isbn = {978-1-4244-4211-9},
month = may,
pages = {184--191},
publisher = {IEEE Computer Society},
title = {{AI Goggles: Real-time Description and Retrieval in the Real World with Online Learning}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5230521},
year = {2009}
}
@phdthesis{ValdivielsoMiranda2010,
abstract = {El proyecto se inicia con la reconstrucci\'{o}n densa de una escena 3D a partir de im\'{a}genes en dos pasos. Con el primero de ellos se obtendr\'{a} la posici\'{o}n 3D de las c\'{a}maras usando la t\'{e}cnica conocida como Bundle Adjustment. En un segundo paso, a partir de estas localizaciones y mediante restricciones proyectivas se densificar\'{a} la reconstrucci\'{o}n 3D de la escena. En esta primera fase del proyecto se desarrollar\'{a} un visor 3D el cual nos permitir\'{a} manipular y visualizar el entorno 3D obtenido a partir de los programas mencionados previamente y que nos ser\'{a} de utilidad para la aplicaci\'{o}n final. La segunda fase del proyecto se plantea el reconocimiento de objetos a partir de im\'{a}genes. El reconocimiento se realizar\'{a} basado en caracter\'{\i}sticas salientes en la imagen. En primer lugar se crear\'{a} una peque\~{n}a base de datos con im\'{a}genes de un conjunto de objetos y su reconstrucci\'{o}n densa. En segundo lugar, se buscar\'{a} en la escena los objetos de la base de datos mediante la comparaci\'{o}n de descriptores asociados a las caracter\'{\i}sticas salientes. Para ello ser\'{a} necesario el desarrollo de una aplicaci\'{o}n que nos permita comparar las im\'{a}genes de los diferentes objetos de nuestra base de datos con las im\'{a}genes de la escena y ver as\'{\i} si los objetos de la base de datos aparecen o no en la escena. Una vez el objeto ha sido reconocido en la escena se pretende sustituir en el modelo 3D de dicha escena la reconstrucci\'{o}n 3D del objeto (por ejemplo, un libro) disponible en nuestra base de datos, permiti\'{e}ndonos as\'{\i} visualizar en la escena 3D partes del libro que no se ve\'{\i}an en las im\'{a}genes de la escena. Para ello ser\'{a} necesaria una tercera y \'{u}ltima fase en el proyecto donde se deber\'{a} posicionar los modelos 3D de los objetos que disponemos en la base de datos y que aparecen en la escena.},
author = {{Valdivielso Miranda}, F\'{e}lix},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Reconocimiento y registro 3D de objetos conocidos en una escena - Valdivielso Miranda - 2010.pdf:pdf},
pages = {87},
school = {Centro Polit\'{e}cnico Superior},
title = {{Reconocimiento y registro 3D de objetos conocidos en una escena}},
year = {2010}
}
@inproceedings{Bane2004,
abstract = {This paper presents a set of interactive tools designed to give users virtual x-ray vision. These tools address a common problem in depicting occluded infrastructure: either too much information is displayed, confusing users, or too little information is displayed, depriving users of important depth cues. Four tools are presented: the tunnel tool and room selector tool directly augment the user's view of the environment, allowing them to explore the scene in direct, first person view. The room in miniature tool allows the user to select and interact with a room from a third person perspective, allowing users to view the contents of the room from points of view that would normally be difficult or impossible to achieve. The room slicer tool aids users in exploring volumetric data displayed within the room in miniature tool. Used together, the tools presented in this paper can be used to achieve the virtual x-ray vision effect. We test our prototype system in a far-field mobile augmented reality setup, visualizing the interiors of a small set of buildings on the UCSB campus.},
author = {Bane, Ryan and H\"{o}llerer, Tobias H.},
booktitle = {ISMAR '04: Proceedings of the 3rd IEEE and ACM International Symposium on Mixed and Augmented Reality},
doi = {10.1109/ISMAR.2004.36},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Interactive Tools for Virtual X-Ray Vision in Mobile Augmented Reality - Bane, Hollerer - 2004.pdf:pdf},
isbn = {0-7695-2191-6},
pages = {231--239},
publisher = {IEEE Computer Society},
title = {{Interactive Tools for Virtual X-Ray Vision in Mobile Augmented Reality}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=1383060},
year = {2004}
}
@inproceedings{Wither2006,
abstract = {We present a mobile augmented reality system for outdoor annotation of the real world. To reduce user burden, we use aerial photographs in addition to the wearable system's usual data sources (position, orientation, camera and user input). This allows the user to accurately annotate 3D features with only a few simple interactions from a single position by aligning features in both their first-person viewpoint and in the aerial view. We examine three types of aerial photograph features - corners, edges, and regions - that are suitable for a wide variety of useful mobile augmented reality applications, and are easily visible on aerial photographs. By using aerial photographs in combination with wearable augmented reality, we are able to achieve much higher accuracy 3D annotation positions than was previously possible from a single user location.},
address = {Santa Barbara, CA},
author = {Feiner, Steven K. and MacIntyre, Blair and H\"{o}llerer, Tobias H. and Webster, Anthony},
booktitle = {ISMAR '06: Proceedings of the 5th IEEE and ACM International Symposium on Mixed and Augmented Reality},
doi = {10.1109/ISMAR.2006.297808},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Using aerial photographs for improved mobile AR annotation - Wither, Diverdi, Hollerer - 2006.pdf:pdf},
isbn = {1-4244-0650-1},
keywords = {annota-,anywhere augmentation,modeling,outdoor augmented reality,tion,wearable system},
month = oct,
pages = {159--162},
publisher = {IEEE Computer Society},
title = {{Using aerial photographs for improved mobile AR annotation}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4079270},
year = {2006}
}
@inproceedings{Georgel2007,
abstract = {Construction companies employ CAD software during the planning phase, but what is finally built often does not match the original plan. The procedure of validating the model is called "discrepancy check". The system proposed here allows the user to easily obtain an augmentation in order to find differences between the planned 3D model and the built items. The main difference to previous body of work in this field is the emphasis on usability and acceptance of the solution. While standard image-based solutions use markers or rely on a "perfect" 3D model to find the pose of the camera, our software uses anchor-plates. Anchor-Plates are rectangular structures installed on walls and ceiling in the majority of industrial edifices. We are using them as landmarks because they are the most reliable components often used as reference coordinates by constructors. Furthermore, for real industrial applications, they are the most suitable solutions in terms of general applicability. Unfortunately, they have not been designed with computer vision applications in mind. On the contrary, they are often made or painted in such way that they are not easily popping out. They are therefore difficult targets to segment and to track. This paper proposes a solution to extract and match them to their 3D counterparts. We created a software that uses the detected structures for pose estimation and image augmentation. The software has been successfully employed to find discrepancies in several rooms of two industrial plants.},
author = {Georgel, Pierre and Schroeder, Pierre and Benhimane, Selim and Hinterstoisser, Stefan and Appel, Mirko and Navab, Nassir},
booktitle = {ISMAR '07: Proceedings of the 6th IEEE and ACM International Symposium on Mixed and Augmented Reality},
doi = {10.1109/ISMAR.2007.4538834},
file = {:D$\backslash$:/\_Docs/mendeleyMain/An Industrial Augmented Reality Solution For Discrepancy Check - Georgel et al. - 2007.pdf:pdf},
isbn = {978-1-4244-1749-0},
month = nov,
pages = {111--115},
publisher = {IEEE},
title = {{An Industrial Augmented Reality Solution For Discrepancy Check}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4538834},
year = {2007}
}
@article{Tang2003,
abstract = {Although there has been much speculation about the potential of Augmented Reality (AR), there are very few empirical studies about its effectiveness. This paper describes an experiment that tested the relative effectiveness of AR instructions in an assembly task. Task information was displayed in user's field of view and registered with the workspace as 3D objects to explicitly demonstrate the exact execution of a procedure step. Three instructional media were compared with the AR system: a printed manual, computer assisted instruction (CAI) using a monitor-based display, and CAI utilizing a head-mounted display. Results indicate that overlaying 3D instructions on the actual work pieces reduced the error rate for an assembly task by 82\%, particularly diminishing cumulative errors - errors due to previous assembly mistakes. Measurement of mental effort indicated decreased mental effort in the AR condition, suggesting some of the mental calculation of the assembly task is offloaded to the system.},
address = {New York, New York, USA},
author = {Tang, Arthur and Owen, Charles and Biocca, Frank and Mou, Weimin},
doi = {10.1145/642625.642626},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Comparative effectiveness of augmented reality in object assembly - Tang et al. - 2003.pdf:pdf},
isbn = {1581136307},
journal = {CHI '03: Proceedings of the 22th SIGCHI Conference on Human Factors in Computing Systems},
keywords = {Augmented reality,computer assisted instruction,human computer interaction,usability study},
number = {1},
pages = {73--80},
publisher = {ACM Press},
title = {{Comparative Effectiveness of Augmented Reality in Object Assembly}},
url = {http://portal.acm.org/citation.cfm?doid=642611.642626},
volume = {5},
year = {2003}
}
@inproceedings{Guven2006,
abstract = {We present a set of techniques that enable mobile users to visualize and navigate complex hypermedia structures embedded in the real world, through augmented reality or virtual reality. Situating hypermedia in the 3D physical environment makes it possible to represent information about users' surroundings in context. However, it requires addressing a new set of problems beyond those of visualizing hypermedia on a 2D display: Nodes and links can potentially be distributed across large distances, and may be occluded by other objects, both real and virtual. Our techniques address these issues by enabling mobile users to select and manipulate portions of the hypermedia structure by tilting, lifting and shifting them, to view more clearly links and nodes that would otherwise be occluded or ambiguously connected.},
address = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4079269},
author = {G\"{u}ven, Sinem and Feiner, Steven K.},
booktitle = {ISMAR '06: Proceedings of the 5th IEEE and ACM International Symposium on Mixed and Augmented Reality},
doi = {10.1109/ISMAR.2006.297807},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Visualizing and navigating complex situated hypermedia in augmented and virtual reality - G\"{u}ven, Feiner - 2006.pdf:pdf},
isbn = {1-4244-0650-1},
keywords = {augmented reality,dia,interaction,navigation,situated hyperme-,virtual reality},
month = oct,
pages = {155--158},
publisher = {IEEE Computer Society},
title = {{Visualizing and navigating complex situated hypermedia in augmented and virtual reality}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4079269},
year = {2006}
}
@inproceedings{Piekarski2003,
abstract = {This paper presents a series of new augmented reality user interaction techniques to support the capture and creation of 3D geometry of large outdoor structures, part of an overall concept we have named construction at a distance. We use information about the user's physical presence, along with hand and head gestures, to allow the user to capture and create the geometry of objects that are orders of magnitude larger than themselves, with no prior information or assistance. Using augmented reality and these new techniques, users can enter geometry and verify its accuracy in real time. This paper includes a number of examples showing objects that have been modelled in the physical world, demonstrating the usefulness of the techniques.},
address = {New York, New York, USA},
author = {Piekarski, Wayne and Thomas, Bruce H.},
booktitle = {EGVE '03: Proceedings of the Workshop on Virtual Environments},
doi = {10.1145/769953.769956},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Interactive augmented reality techniques for construction at a distance of 3D geometry - Piekarski, Thomas - 2003.pdf:pdf},
isbn = {3905673002},
keywords = {3D user interfaces,Augmented reality,construction at a distance,wearable computers},
pages = {19--28},
publisher = {ACM Press},
title = {{Interactive augmented reality techniques for construction at a distance of 3D geometry}},
url = {http://portal.acm.org/citation.cfm?doid=769953.769956},
year = {2003}
}
@inproceedings{Schmeil2007,
abstract = {The use of personal electronic equipment has significantly increased during recent years. Augmented Reality (AR) technology enables mobile devices to provide a very rich user experience by combining mobile computing with connectivity and location-awareness. In this paper we discuss the approach and development of an Augmented Reality-based personal assistant, combining the familiar interface of a human person with the functionality of a location-aware digital information system. The paper discusses the main components of the system, including the anthropomorphic user interface as well as the results of an initial prototype evaluation.},
author = {Schmeil, Andreas and Broll, Wolfgang},
booktitle = {UAHCI '07: Proceedings of the 4th International Conference on Universal Access in Human-Computer Interaction: Ambient Interaction},
doi = {10.1007/978-3-540-73281-5\_76},
file = {:D$\backslash$:/\_Docs/mendeleyMain/An Anthropomorphic AR-Based Personal Information Manager and Guide - Schmeil, Broll - 2007.pdf:pdf},
keywords = {anthropomorphic user interfaces,augmented reality,digital assistants,environment model,location based systems,mobile computing,virtual humans},
pages = {699--708},
publisher = {Springer-Verlag},
series = {Lecture Notes in Computer Science},
title = {{An Anthropomorphic AR-Based Personal Information Manager and Guide}},
year = {2007}
}
@inproceedings{Reitmayr2004,
abstract = {Augmented reality (AR) can provide an excellent user interface for visualization in a mobile computing application. The user's view is augmented with location based information at the correct spatial location, thus providing an intuitive way of presenting such information. In this work we demonstrate the use of AR for collaborative navigation and information browsing tasks in an urban environment. A navigation function allows one or more users to roam through a city and guides them to selected destinations. Information browsing presents users with information about objects in their surrounding. Both functions feature support for collaboration. The developed system does not only concentrate on the user interface aspects but also provides a scalable infrastructure to support mobile applications. To this end we developed a 3-tier architecture to manage a common data model for a set of applications. It is inspired by current Internet application frameworks and consists of a central storage layer using a common data model, a transformation layer responsible for filtering and adapting the data to the requirements of a particular applications on request, and finally of the applications itself.},
author = {Reitmayr, Gerhard and Schmalstieg, Dieter},
booktitle = {Proceedings of the Symposium on Location Based Services and TeleCartography},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Collaborative Augmented Reality for Outdoor Navigation and Information Browsing - Reitmayr, Schmalstieg - 2004.pdf:pdf},
pages = {31--41},
publisher = {Wiley},
title = {{Collaborative Augmented Reality for Outdoor Navigation and Information Browsing}},
year = {2004}
}
@article{Weiser1993,
author = {Weiser, Mark},
doi = {10.1145/159544.159617},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Some computer science issues in ubiquitous computing - Weiser - 1993.pdf:pdf},
issn = {00010782},
journal = {Communications of the ACM},
month = jul,
number = {7},
pages = {75--84},
title = {{Some computer science issues in ubiquitous computing}},
url = {http://portal.acm.org/citation.cfm?doid=159544.159617},
volume = {36},
year = {1993}
}
@inproceedings{Fiala2005,
abstract = {Fiducial marker systems consist of patterns that are mounted in the environment and automatically detected in digital camera images using an accompanying detection algorithm. They are useful for augmented reality (AR), robot navigation, and general applications where the relative pose between a camera and object is required. Important parameters for such marker systems is their false detection rate (false positive rate), their inter-marker confusion rate, minimal detection size (in pixels) and immunity to lighting variation. ARTag is a marker system that uses digital coding theory to get a very low false positive and inter-marker confusion rate with a small required marker size, employing an edge linking method to give robust lighting variation immunity. ARTag markers are bi-tonal planar patterns containing a unique ID number encoded with robust digital techniques of checksums and forward error correction (FEC). This proposed new system, ARTag has very low and numerically quantifiable error rates, does not require a grey scale threshold as does other marker systems, and can encode up to 2002 different unique ID's with no need to store patterns. Experimental results are shown validating this system.},
address = {San Diego, California},
author = {Fiala, M.},
booktitle = {CVPR '05: Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition},
doi = {10.1109/CVPR.2005.74},
isbn = {0-7695-2372-2},
pages = {590--596},
publisher = {IEEE Computer Society},
title = {{ARTag, a Fiducial Marker System Using Digital Techniques}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=1467495},
volume = {2},
year = {2005}
}
@book{Haller2006,
abstract = {Although the field of mixed reality has grown significantly over the last decade, there have been few published books about augmented reality, particularly the interface design aspects. Emerging Technologies of Augmented Reality: Interfaces and Design provides a foundation of the main concepts of augmented reality (AR), with a particular emphasis on user interfaces, design, and practical AR techniques, from tracking algorithms to design principles for AR interfaces. Emerging Technologies of Augmented Reality: Interfaces and Design contains comprehensive information focusing on the following topics: technologies that support AR, development environments, interface design and evaluation of applications, and case studies of AR applications.},
address = {Hershey, PA, USA},
author = {Haller, Michael and Billinghurst, Mark and Thomas, Bruce H.},
doi = {10.4018/978-1-59904-066-0},
editor = {Haller, Michael and Billinghurst, Mark and Thomas, Bruce},
isbn = {9781599040660},
month = nov,
pages = {399},
publisher = {IGI Global},
title = {{Emerging Technologies of Augmented Reality}},
url = {http://services.igi-global.com/resolvedoi/resolve.aspx?doi=10.4018/978-1-59904-066-0},
year = {2006}
}
@article{Choi2010,
abstract = {Recent improvements in the capabilities of smart phones are making the location-based augmented reality services a reality. When widely used, such a system is expected to produce many user-created geospatial tags concentrated at popular and important sites. In this paper, we describe a way to organize and group such geospatial tags (manually or automatically using a nearest neighbor algorithm) and how to efficiently interact to search and find the tag that the user might be interested in. The proposed method was implemented on an Apple iPhone, and an experiment was carried out to verify the improved usability. The results do indicate the advantage of the principle of hierarchical organization of data. We also further found that the “automatic-but-less-accurate” approach is more suitable than “precise-but-manual” due to the dynamic nature of the mobile interaction and less than perfect sensing.},
author = {Choi, Jinhyuk and Jang, Bongkyu and Kim, Gerard J.},
doi = {10.1007/s00779-010-0343-3},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Organizing and presenting geospatial tags in location-based augmented reality - Choi, Jang, Kim - 2010.pdf:pdf},
issn = {1617-4909},
journal = {Personal and Ubiquitous Computing},
keywords = {augmented reality,geospatial tags,mobile interface},
month = nov,
number = {6},
pages = {641--647},
title = {{Organizing and presenting geospatial tags in location-based augmented reality}},
url = {http://www.springerlink.com/index/10.1007/s00779-010-0343-3},
volume = {15},
year = {2010}
}
@inproceedings{Saaski2008,
abstract = {This paper presents a methodology and a system for augmented reality aided assembly work. We concentrate in particular on the requirements on information processing and data flow for implementing augmented assembly systems in real life production environments. A pilot case with an augmented assembly task at the Finnish tractor company Valtra is described.},
address = {Chamonix, France},
author = {S\"{a}\"{a}ski, Juha and Salonen, Tapio and Hakkarainen, Mika and Siltanen, Sanni and Woodward, Charles and Lempi\"{a}inen, Juhani},
booktitle = {IPAS '08: Proceedings of the IFIP TC5 WG5.5 Fourth International Precision Assembly Seminar},
doi = {10.1007/978-0-387-77405-3\_39},
editor = {Ratchev, Svetan M. and Koelemeijer, Sandra},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Integration of Design and Assembly Using Augmented Reality - S\"{a}\"{a}ski et al. - 2008.pdf:pdf},
keywords = {assembly instruction,assembly work,augmented reality,cad,pdm},
pages = {395--404},
publisher = {Springer Boston},
series = {IFIP International Federation for Information Processing},
title = {{Integration of Design and Assembly Using Augmented Reality}},
volume = {260},
year = {2008}
}
@article{Ajanki2011,
abstract = {In this paper, we report on a prototype augmented reality (AR) platform for accessing abstract information in real-world pervasive computing environments. Using this platform, objects, people, and the environment serve as contextual channels to more information. The user’s interest with respect to the environment is inferred from eye movement patterns, speech, and other implicit feedback signals, and these data are used for information filtering. The results of proactive context-sensitive information retrieval are augmented onto the view of a handheld or head-mounted display or uttered as synthetic speech. The augmented information becomes part of the user’s context, and if the user shows interest in the AR content, the system detects this and provides progressively more information. In this paper, we describe the first use of the platform to develop a pilot application, Virtual Laboratory Guide, and early evaluation results of this application.},
author = {Ajanki, Antti and Billinghurst, Mark and Gamper, Hannes and J\"{a}rvenp\"{a}\"{a}, Toni and Kandemir, Melih and Kaski, Samuel and Koskela, Markus and Kurimo, Mikko and Laaksonen, Jorma and Puolam\"{a}ki, Kai and Ruokolainen, Teemu and Tossavainen, Timo},
doi = {10.1007/s10055-010-0183-5},
file = {:D$\backslash$:/\_Docs/mendeleyMain/An augmented reality interface to contextual information - Ajanki et al. - 2011.pdf:pdf},
issn = {1359-4338},
journal = {Virtual Reality},
keywords = {augmented reality,gaze tracking,information retrieval,machine learning,pattern recognition},
month = dec,
number = {2-3},
pages = {161--173},
title = {{An augmented reality interface to contextual information}},
url = {http://www.springerlink.com/index/10.1007/s10055-010-0183-5},
volume = {15},
year = {2011}
}
@inproceedings{Kahari,
abstract = {This paper outlines some of the features and functionalities of MARA, Sensor Based Mobile Augmented Reality system, demo. The MARA system provided by a GPS receiver, implements hand-held, video-see through augmented reality for Nokia S60 mobile imaging devices equipped with additional sensors, shown in Figure 1. The system utilizes sensors as follows: position is accelerometers provide relative orientation and a tilt compensated magnetometer is used to determine heading. The device’s on-board camera is used for image acquisition and the on-board screen for rendering, including annotations. All the annotation data and additional map images are downloaded from external services on the Internet via cellular network connection. The system is based upon a light-weight and portable standard platform. It requires no additional devices beyond the sensors. The platform also has excellent capabilities for network connectivity and great potential for multimodality.},
address = {Santa Barbara, CA},
author = {K\"{a}h\"{a}ri, Markus and Murphy, David J.},
booktitle = {ISMAR '06: Proceedings of the 5th IEEE and ACM International Symposium on Mixed and Augmented Reality},
file = {:D$\backslash$:/\_Docs/mendeleyMain/MARA – Sensor Based Augmented Reality System for Mobile Imaging Device - K\"{a}h\"{a}ri, Murphy - 2006.pdf:pdf},
keywords = {Sensor based,mobile augmented reality,mobile imaging device},
publisher = {IEEE Computer Society},
title = {{MARA – Sensor Based Augmented Reality System for Mobile Imaging Device}},
year = {2006}
}
@inproceedings{Duh2008,
abstract = {Although Augmented Reality technology was first developed over forty years ago, there has been little survey work giving an overview of recent research in the field. This paper reviews the ten-year development of the work presented at the ISMAR conference and its predecessors with a particular focus on tracking, interaction and display research. It provides a roadmap for future augmented reality research which will be of great value to this relatively young field, and also for helping researchers decide which topics should be explored when they are beginning their own studies in the area.},
address = {Cambridge},
author = {Duh, Henry Been-Lirn and Billinghurst, Mark},
booktitle = {ISMAR '08: Proceedings of the 7th IEEE and ACM International Symposium on Mixed and Augmented Reality},
doi = {10.1109/ISMAR.2008.4637362},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Trends in augmented reality tracking, interaction and display A review of ten years of ISMAR - Duh, Billinghurst - 2008.pdf:pdf},
isbn = {978-1-4244-2840-3},
keywords = {AR application,AR display,Augmented reality,calibration and registration,interaction,tracking},
mendeley-tags = {AR application,AR display,Augmented reality,calibration and registration,interaction,tracking},
month = sep,
pages = {193--202},
publisher = {IEEE Computer Society},
title = {{Trends in augmented reality tracking, interaction and display: A review of ten years of ISMAR}},
url = {http://ir.canterbury.ac.nz/bitstream/10092/2345/1/12613246\_2008-Trend-inAugmentedRealityTrackingInteractionandDisplayAReviewofTenYearsofISMAR.pdf},
year = {2008}
}
@inproceedings{Wagner2008a,
abstract = {Marker tracking has revolutionized augmented reality about a decade ago. However, this revolution came at the expense of visual clutter. In this paper, we propose several new marker techniques, which are less obtrusive than the usual black and white squares. Furthermore, we report methods that allow tracking beyond the visibility of these markers further improving robustness. All presented techniques are implemented in a single tracking library, are highly efficient in their memory and CPU usage and run at interactive frame rates on mobile phones.},
address = {Cambridge},
author = {Wagner, Daniel and Langlotz, Tobias and Schmalstieg, Dieter},
booktitle = {ISMAR '08: Proceedings of the 7th IEEE and ACM International Symposium on Mixed and Augmented Reality},
doi = {10.1109/ISMAR.2008.4637337},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Robust and Unobtrusive Marker Tracking on Mobile Phones - Wagner, Langlotz, Schmalstieg - 2008.pdf:pdf},
isbn = {978-1-4244-2840-3},
month = sep,
pages = {121--124},
publisher = {IEEE Computer Society},
title = {{Robust and Unobtrusive Marker Tracking on Mobile Phones}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4637337},
year = {2008}
}
@inproceedings{Mohring2004,
abstract = {We present a first running video see-through augmented reality system on a consumer cell-phone. It supports the detection and differentiation of different markers, and correct integration of rendered 3D graphics into the live video stream via a weak perspective projection camera model and an OpenGL rendering pipeline.},
author = {M\"{o}hring, Mathias and Lessig, Christian and Bimber, Oliver},
booktitle = {ISMAR '04: Proceedings of the 3rd IEEE and ACM International Symposium on Mixed and Augmented Reality},
doi = {10.1109/ISMAR.2004.63},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Video See-Through AR on Consumer Cell-Phones - M\"{o}hring, Lessig, Bimber - 2004.pdf:pdf},
isbn = {0-7695-2191-6},
pages = {252--253},
publisher = {IEEE Computer Society},
title = {{Video See-Through AR on Consumer Cell-Phones}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=1383062},
year = {2004}
}
@inproceedings{Rekimoto1998,
abstract = {The paper introduces a novel technique for producing augmented reality systems that simultaneously identify real world objects and estimate their coordinate systems. This method utilizes a 2D matrix marker, a square shaped barcode, which can identify a large number of objects. It also acts as a landmark to register information on real world images. As a result, it costs virtually nothing to produce and attach codes to various kinds of real world objects, because the matrix code is printable. We have developed an augmented reality system based on this method, and demonstrated several potential applications},
address = {Shonan Village Center, Japan},
author = {Rekimoto, Jun},
booktitle = {APCHI '98: Proceedings of the 3rd Asia Pacific Computer-Human Interaction},
doi = {10.1109/APCHI.1998.704151},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Matrix a realtime object identification and registration method for augmented reality - Rekimoto - 1998.pdf:pdf},
isbn = {0-8186-8347-3},
pages = {63--68},
publisher = {IEEE Computer Society},
title = {{Matrix: a realtime object identification and registration method for augmented reality}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=704151},
year = {1998}
}
@inproceedings{Wagner2007,
abstract = {In this paper we present ARToolKitPlus, a successor to the popular ARToolKit pose tracking library. ARToolKitPlus has been optimized and extended for the usage on mobile devices such as smartphones, PDAs and Ultra Mobile PCs (UMPCs). We explain the need and specific requirements of pose tracking on mobile devices and how we met those requirements. To prove the applicability we performed an extensive benchmark series on a broad range of off-the-shelf handhelds.},
author = {Wagner, Daniel and Schmalstieg, Dieter},
booktitle = {CVWW '07: Proceedings of the 12th Computer Vision Winter Workshop},
file = {:D$\backslash$:/\_Docs/mendeleyMain/ARToolKitPlus for Pose Tracking on Mobile Devices ARToolKit - Wagner, Schmalstieg - 2007.pdf:pdf},
pages = {139--146},
title = {{ARToolKitPlus for Pose Tracking on Mobile Devices ARToolKit}},
url = {http://www.icg.tugraz.at/pub/Members/daniel/ARToolKitPlusMobilePoseTracking/download/},
year = {2007}
}
@phdthesis{Klein2006,
abstract = {To address the demanding tracking needs of AR, two specific AR formats are considered. Firstly, for a head-mounted display, a markerless tracker which is robust to rapid head motions is presented. This robustness is achieved by combining visual measurements with those of head-worn in- ertial sensors. A novel sensor fusion approach allows not only pose pre- diction, but also enables the tracking of video with unprecedented levels ofmotion blur. Secondly, the tablet PC is proposed as a user-friendlyARmedium. For this device, tracking combines inside-out edge tracking with outside-in track- ing of tablet-mounted LEDs. Through the external fusion of these comple- mentary sensors, accurate and robust tracking is achievedwithin amodest computing budget. This allows further visual analysis of the occlusion boundaries between real and virtual objects and a marked improvement in the quality of augmentations. Finally, this thesis shows that not only can tracking be made resilient to motion blur, it can benefit from it. By exploiting the directional nature of motion blur, camera rotations can be extracted from individual blurred frames. The extreme efficiency of the proposed method makes it a viable drop-in replacement for inertial sensors.},
author = {Klein, Georg},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Visual Tracking for Augmented Reality - Klein - 2006.pdf:pdf},
number = {January},
pages = {193},
school = {University of Cambridge},
title = {{Visual Tracking for Augmented Reality}},
year = {2006}
}
@inproceedings{Wagner2009,
abstract = {We present an overview on the history of tracking for mobile phone augmented reality. We present popular approaches using marker tracking, natural feature tracking or offloading to nearby servers. We then outline likely future work.},
address = {Gwangju, South Korea},
author = {Wagner, Daniel and Schmalstieg, Dieter},
booktitle = {ISUVR '09: Proceedings of the International Symposium on Ubiquitous Virtual Reality},
doi = {10.1109/ISUVR.2009.11},
file = {:D$\backslash$:/\_Docs/mendeleyMain/History and Future of Tracking for Mobile Phone Augmented Reality - Wagner, Schmalstieg - 2009.pdf:pdf},
isbn = {978-1-4244-4437-3},
month = jul,
pages = {7--10},
publisher = {IEEE Computer Society},
title = {{History and Future of Tracking for Mobile Phone Augmented Reality}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5232244},
year = {2009}
}
@article{Azuma1997,
abstract = {This paper surveys the field of Augmented Reality, in which 3-D virtual objects are integrated into a 3-D real environment in real time. It describes the medical, manufacturing, visualization, path planning, entertainment and military applications that have been explored. This paper describes the characteristics of Augmented Reality systems, including a detailed discussion of the tradeoffs between optical and video blending approaches. Registration and sensing errors are two of the biggest problems in building effective Augmented Reality systems, so this paper summarizes current efforts to overcome these problems. Future directions and areas requiring further research are discussed. This survey provides a starting point for anyone interested in researching or using Augmented Reality.},
author = {Azuma, Ronald T.},
file = {:D$\backslash$:/\_Docs/mendeleyMain/A Survey of Augmented Reality - Azuma - 1997.pdf:pdf},
journal = {Presence: Teleoperators and Virtual Environments},
number = {4},
pages = {355--385},
title = {{A Survey of Augmented Reality}},
url = {http://www.cs.unc.edu/~azuma/ARpresence.pdf},
volume = {6},
year = {1997}
}
@inproceedings{Rosenthal2010,
abstract = {We present a study that evaluates the effectiveness of augmenting on-screen instructions with micro-projection for manual task guidance unlike prior work, which replaced screen instructions with alternative modalities (e.g., head- mounted displays). In our study, 30 participants completed 10 trials each of 11 manual tasks chosen to represent a set of common task-components (e.g., cutting, folding) found in many everyday activities such as crafts, cooking, and hobby electronics. Fifteen participants received only on- screen instructions, and 15 received both on-screen and micro-projected instructions. In contrast to prior work, which focused only on whole tasks, our study examines the benefit of augmenting common task instructions. The augmented instructions improved participants’ performance overall; however, we show that in certain cases when projected guides and physical objects visually interfered, projected elements caused increased errors. Our results demonstrate that examining effectiveness at an instruction level is both useful and necessary, and provide insight into the design of systems that help users perform everyday tasks.},
address = {Copenhagen, Denmark},
author = {Rosenthal, Stephanie and Kane, Shaun K. and Wobbrock, Jacob O. and Avrahami, Daniel},
booktitle = {UbiComp '10: Proceedings of the 12th ACM International Conference on Ubiquitous Computing},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Augmenting On-Screen Instructions with Micro-Projected Guides When it Works, and When it Fails - Rosenthal et al. - 2010.pdf:pdf},
keywords = {Task guidance,augmented reality,computer vision,computer-assisted instruction,everyday tasks,micro-projection},
pages = {203--212},
publisher = {ACM Press},
title = {{Augmenting On-Screen Instructions with Micro-Projected Guides: When it Works, and When it Fails}},
year = {2010}
}
@techreport{Wimo2010b,
author = {Wimo, Anders and Prince, Martin},
file = {:D$\backslash$:/\_Docs/mendeleyMain/World Alzheimer Report 2010 The Global Economic Impact of Dementia - Executive Summary - Wimo, Prince - 2010.pdf:pdf},
institution = {Alzheimer's Disease International},
pages = {12},
title = {{World Alzheimer Report 2010: The Global Economic Impact of Dementia - Executive Summary}},
url = {http://www.alz.co.uk/research/files/WorldAlzheimerReport2010ExecutiveSummary.pdf},
year = {2010}
}
@inproceedings{Hodges2006,
abstract = {This paper presents a novel ubiquitous computing device, the Sense- Cam, a sensor augmented wearable stills camera. SenseCam is designed to cap- ture a digital record of the wearer’s day, by recording a series of images and capturing a log of sensor data. We believe that reviewing this information will help the wearer recollect aspects of earlier experiences that have subsequently been forgotten, and thereby form a powerful retrospective memory aid. In this paper we review existing work on memory aids and conclude that there is scope for an improved device. We then report on the design of SenseCam in some de- tail for the first time. We explain the details of a first in-depth user study of this device, a 12-month clinical trial with a patient suffering from amnesia. The re- sults of this initial evaluation are extremely promising; periodic review of im- ages of events recorded by SenseCam results in significant recall of those events by the patient, which was previously impossible. We end the paper with a discussion of future work, including the application of SenseCam to a wider audience, such as those with neurodegenerative conditions such as Alzheimer’s disease.},
address = {Orange County, CA},
author = {Hodges, Steve and Williams, Lyndsay and Berry, Emma and Izadi, Shahram and Srinivasan, James and Butler, Alex and Smyth, Gavin and Kapur, Narinder and Wood, Ken},
booktitle = {UbiComp '06: Proceedings of the 8th International Conference on Ubiquitous Computing},
doi = {10.1007/11853565\_11},
editor = {Dourish, Paul and Friday, Adrian},
file = {:D$\backslash$:/\_Docs/mendeleyMain/SenseCam A Retrospective Memory Aid - Hodges et al. - 2006.pdf:pdf},
pages = {177 -- 193},
publisher = {Springer Berlin / Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{SenseCam: A Retrospective Memory Aid}},
url = {http://www.springerlink.com/content/kj96324768567r68/fulltext.pdf},
volume = {4206},
year = {2006}
}
@article{Riche2009,
abstract = {Caring for the elderly is becoming a key challenge for society, given the shortage of trained personnel and the increased age of the population. Innovative approaches are needed to help the elderly remain at home longer and more safely, that is, to age in place. One popular strategy is to monitor the activity of the elderly: this focuses on obtaining information for caregivers rather than supporting the elderly directly. We propose an alternative, i.e. to enhance their inter-personal communication. We report the results of a user study with 14 independent elderly women and discuss the existing role that communication plays in maintaining their independence and well- being. We highlight the importance of peer support relationships, which we call PeerCare, and how awareness of each other’s rhythms and routines helps them to stay in touch. We then describe the deployment of a technology probe, called markerClock, which a pair of elderly friends used to improve their awareness of each other’s rhythms and routines. We conclude with a discussion of how such communication appliances enhance the awareness of rhythms and routines among elderly peers and can improve their quality of life and provide safer and more satisfying aging in place.},
author = {Riche, Yann and Mackay, Wendy},
doi = {10.1007/s10606-009-9105-z},
file = {:D$\backslash$:/\_Docs/mendeleyMain/PeerCare Supporting Awareness of Rhythms and Routines for Better Aging in Place - Riche, Mackay - 2009.pdf:pdf},
issn = {0925-9724},
journal = {Computer Supported Cooperative Work (CSCW)},
keywords = {aging in place,appliances,awareness,communication,computer-mediated communication,elderly,markerclock,peercare,rhythms,routines,technology probes},
month = nov,
number = {1},
pages = {73--104},
title = {{PeerCare: Supporting Awareness of Rhythms and Routines for Better Aging in Place}},
url = {http://www.springerlink.com/index/10.1007/s10606-009-9105-z},
volume = {19},
year = {2009}
}
@phdthesis{Olsson2009,
abstract = {This thesis describes the theory and implementation of both local and distributed systems for object recognition on the mobile Android platform. It further describes the possibilities and limitations of computer vision applications on modern mobile devices. Depending on the application, some or all of the computations may be out- sourced to a server to improve performance. The object recognition methods used are based on local features. These features are extracted and matched against a known set of features in the mobile device or on the server depending on the implementation. In the thesis we describe local features using the popular SIFT and SURF algorithms. The matching is done using both simple exhaustive search and more advanced algorithms such as kd-tree best-bin-first search. To improve the quality of the matches in regards to false positives we have used different RANSAC type iterative methods. We describe two implementations of applications for single- and multi-object recognition, and a third, heavily optimized, SURF implementation to achieve near real-time tracking on the client. The implementations are focused on the Java language and special considerations have been taken to accommodate this. This choice of platform reflects the general direction of the mobile industry, where an increasing amount of application develop- ment is done in high-level languages such as Java. We also investigate the use of native programming languages such as C/C++ on the same platform. Finally, we present some possible extensions of our implementations as future work. These extensions specifically take advantage of the hardware and abilities of modern mobile devices, including orientation sensors and cameras.},
author = {Olsson, Sebastian and \AA kesson, Philip},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Distributed Mobile Computer Vision and Applications on the Android Platform - Olsson, \AA kesson - 2009.pdf:pdf},
pages = {72},
school = {Lund University},
title = {{Distributed Mobile Computer Vision and Applications on the Android Platform}},
year = {2009}
}
@phdthesis{Conti2010,
abstract = {We propose the design and development of a framework for the creation of small visual features database. This database is to be used on mobile devices to perform building recognition on a self- contained “tell me what I am looking at” application using two inputs: GPS data and camera images. The main contribution of our approach is exploring the automated creation of a compact local visual features database to be installed on the mobile device. Using a local database is justified by scenarios where a data connection to a remote server is not available or too expensive (e.g. tourists using data roaming abroad). Creating a compact database requires a balance between various constraints. The number of visual features in the database will affects both the size of the database on the limited storage of a mobile platform and the computation time of the image matching. However, having a small number of features in the database also results in poor results. This project evaluate the use of a genetic algorithm that will select the best parameters to build the database using visual features clustering.},
author = {Conti, Marco},
file = {:D$\backslash$:/\_Docs/mendeleyMain/A Framework for Visual Features Database Creation for Building Recognition on Mobile Devices - Conti - 2010.pdf:pdf},
number = {September},
pages = {81},
school = {University of Dublin, Trinity College},
title = {{A Framework for Visual Features Database Creation for Building Recognition on Mobile Devices}},
url = {http://www.scss.tcd.ie/postgraduate/msciet/current/Dissertations/0910/Conti.pdf},
year = {2010}
}
@inproceedings{Romero2009,
abstract = {El problema del SLAM (Simultaneus Localization And Mapping) es un problema clave en el campo de la rob tica. Las soluciones a dicho problema tienen problemas computacionales cuando el n mero de caracter sticas aumenta. En este trabajo, pretendemos realizar una comparativa de dos detectores de caracter sticas visuales, SIFT y SURF, as como proponer un nuevo m todo de emparejamiento que nos permita determinar si la imagen actual es parecida a una vista con anterioridad.},
author = {Romero, A. M. and Cazorla, Miguel A.},
booktitle = {WAF '09: X Workshop de Agentes F\'{\i}sicos},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Comparativa de detectores de caracter\'{\i}sticas visuales y su aplicaci\'{o}n al SLAM - Romero, Cazorla - 2009.pdf:pdf},
pages = {55--62},
title = {{Comparativa de detectores de caracter\'{\i}sticas visuales y su aplicaci\'{o}n al SLAM}},
url = {http://www.jopha.net/waf/index.php/waf/waf09/paper/viewFile/41/35},
year = {2009}
}
@inproceedings{Beis1997,
abstract = {Shape indexing is a way of making rapid associations between features detected in an image and object models that could have produced them. When model databases are large, the use of high-dimensional features is critical, due to the improved level of discrimination they can provide. Unfortunately, finding the nearest neighbour to a query point rapidly becomes inefficient as the dimensionality of the feature space increases. Past indexing methods have used hash tables for hypothesis recovery, but only in low-dimensional situations. In this paper, we show that a new variant of the k-d tree search algorithm makes indexing in higherdimensional spaces practical. This Best Bin First, or BBF, search is an approximate algorithm which finds the nearest neighbour for a large fraction of the queries, and a very close neighbour in the remaining cases. The technique has been integrated into a fully developed recognition system, which is able to detect complex objects in real, cluttered scenes in just a few seconds.},
address = {San Juan , Puerto Rico},
author = {Beis, Jefrey S. and Lowe, David G.},
booktitle = {CVPR '98: Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition},
doi = {10.1109/CVPR.1997.609451},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Shape Indexing Using Approximate Nearest-Neighbour Search in High-Dimensional Spaces - Beis, Lowe - 1998.pdf:pdf},
isbn = {0-8186-7822-4},
pages = {1000--1006},
publisher = {IEEE Computer Society},
title = {{Shape Indexing Using Approximate Nearest-Neighbour Search in High-Dimensional Spaces}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=609451},
year = {1998}
}
@inproceedings{Takacs2008,
abstract = {We have built an outdoors augmented reality system for mobile phones that matches camera-phone images against a large database of location-tagged images using a robust image retrieval algorithm. We avoid network latency by implementing the algorithm on the phone and deliver excellent performance by adapting a state-of- the-art image retrieval algorithm based on robust local descriptors. Matching is performed against a database of highly relevant fea- tures, which is continuously updated to reflect changes in the en- vironment. We achieve fast updates and scalability by pruning of irrelevant features based on proximity to the user. By compress- ing and incrementally updating the features stored on the phone we make the system amenable to low-bandwidth wireless connections. We demonstrate system robustness on a dataset of location-tagged images and show a smart-phone implementation that achieves a high image matching rate while operating in near real-time.},
address = {Vancouver, British Columbia, Canada},
author = {Takacs, Gabriel and Chandrasekhar, Vijay and Gelfand, Natasha and Xiong, Yingen and Chen, Wei-Chao and Bismpigiannis, Thanos and Grzeszczuk, Radek and Pulli, Kari and Girod, Bernd},
booktitle = {MIR '08: Proceedings of the 1st ACM International Conference on Multimedia Information Retrieval},
doi = {10.1145/1460096.1460165},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Outdoors augmented reality on mobile phone using loxel-based visual feature organization - Takacs et al. - 2008.pdf:pdf},
isbn = {9781605583129},
keywords = {augmented reality,geo-referenced photos,image search,local search,mobile computing,photo collections},
pages = {427},
publisher = {ACM Press},
title = {{Outdoors augmented reality on mobile phone using loxel-based visual feature organization}},
url = {http://portal.acm.org/citation.cfm?doid=1460096.1460165},
year = {2008}
}
@inproceedings{Brown2005,
abstract = {This paper describes a novel multi-view matching framework based on a new type of invariant feature. Our features are located at Harris corners in discrete scale-space and oriented using a blurred local gradient. This defines a rotationally invariant frame in which we sample a feature descriptor, which consists of an 8 × 8 patch of bias/gain normalised intensity values. The density of features in the image is controlled using a novel adaptive non-maximal suppression algorithm, which gives a better spatial distribution of features than previous approaches. Matching is achieved using a fast nearest neighbour algorithm that indexes features based on their low frequency Haar wavelet coefficients. We also introduce a novel outlier rejection procedure that verifies a pairwise feature match based on a background distribution of incorrect feature matches. Feature matches are refined using RANSAC and used in an automatic 2D panorama stitcher that has been extensively tested on hundreds of sample inputs.},
author = {Brown, Matthew and Szeliski, Richard and Winder, Simon},
booktitle = {CVPR '05: Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition},
doi = {10.1109/CVPR.2005.235},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Multi-Image Matching Using Multi-Scale Oriented Patches - Brown, Szeliski, Winder - 2005.pdf:pdf},
isbn = {0-7695-2372-2},
pages = {510--517},
publisher = {IEEE},
title = {{Multi-Image Matching Using Multi-Scale Oriented Patches}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=1467310},
year = {2005}
}
@article{Wimo2010,
abstract = {Background: The purpose of this study was to update the previous estimate of the worldwide cost of dementia in 2005 to 2009. Methods: The cost model is based on prevalence estimates, country and region-specific data on Gross Domestic Product per person and average wage, with results from previously published cost-of-illness studies in different countries. Prevalence figures are updated to 2009 and costs were adjusted to 2009 constant US dollars (\$). Results: The total worldwide societal cost of dementia, based on a dementia population of 34.4 mil- lion demented persons, was estimated to \$422 billion in 2009, including \$142 billion for informal care (34\%). Conclusions: The worldwide cost of dementia has increased by 34\% (18\% in fixed prices) between 2005 and 2009.},
author = {Wimo, Anders and Winblad, Bengt and J\"{o}nsson, Linus},
doi = {10.1016/j.jalz.2010.01.010},
file = {:D$\backslash$:/\_Docs/mendeleyMain/The worldwide societal costs of dementia Estimates for 2009 - Wimo, Winblad, J\"{o}nsson - 2010.pdf:pdf},
issn = {1552-5279},
journal = {Alzheimer's \& Dementia: The Journal of the Alzheimer's Association},
keywords = {Africa,Alzheimer’s disease,Asia,Caribbean,Central America,Costs,Economic impact,Economics,Europe,Global burden of disease,Informal care,North America,Oceania,Prevalence,South America},
month = mar,
number = {2},
pages = {98--103},
pmid = {20298969},
title = {{The worldwide societal costs of dementia: Estimates for 2009}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/20298969},
volume = {6},
year = {2010}
}
@inproceedings{Savitch2005,
abstract = {People with dementia have problems with memory, attention, language and orientation. Designers in the physical environment have started to consider the needs of people with dementia, but research into the use of computers by people with dementia has not been widespread. The large English-speaking Alzheimer’s associations around the world are all committed to providing information for people with dementia via their websites. However, analysis of the websites indicates that the pages may not have been designed specifically for people with memory and language problems in mind. A small scale preliminary evaluation of four Alzheimer’s association websites from across the world demonstrated that people with dementia can contribute to the design of websites. The location of the link from the home page to information specifically for people with dementia is of particular importance.},
address = {Las Vegas},
author = {Savitch, Nada and Zaphiris, Panayiotis},
booktitle = {HCI '05: Proceedings of the 11th International Conference on Human-Computer Interaction},
file = {:D$\backslash$:/\_Docs/mendeleyMain/An investigation into the accessibility of web-based information for people with dementia dementia - Savitch, Zaphiris - 2005.pdf:pdf},
number = {McIntosh 1999},
pages = {1--10},
title = {{An investigation into the accessibility of web-based information for people with dementia dementia}},
year = {2005}
}
@incollection{Gossow2011,
abstract = {SURF (Speeded Up Robust Features) is a detector and descriptor of local scale- and rotation-invariant image features. By using integral images for image convolutions it is faster to compute than other state-of-the-art algorithms, yet produces comparable or even better results by means of repeatability, distinctiveness and robustness. A library implementing SURF is provided by the authors. However, it is closed-source and thus not suited as a basis for further research. Several open source implementations of the algorithm exist, yet it is unclear how well they realize the original algorithm. We have evaluated different SURF implementations written in C++ and compared the results to the original implementation. We have found that some implementations produce up to 33\% lower repeatability and up to 44\% lower maximum recall than the original implementation, while the implementation provided with the software Pan-o-matic produced almost identical results. We have extended the Pan-o-matic implementation to use multi-threading, resulting in an up to 5.1 times faster computation on an 8-core machine. We describe our comparison criteria and our ideas that lead to the speed-up. Our software is put into the public domain.},
author = {Gossow, David and Decker, Peter and Paulus, Dietrich},
booktitle = {RoboCup 2010: Robot Soccer World Cup XIV},
doi = {10.1007/978-3-642-20217-9\_15},
edition = {First edit},
editor = {Ruiz-del-Solar, Javier and Chown, Eric and Ploeger, Paul G.},
file = {:D$\backslash$:/\_Docs/mendeleyMain/An Evaluation of Open Source SURF Implementations - Gossow, Decker, Paulus - 2011.pdf:pdf},
pages = {169--179},
publisher = {Springer Berlin / Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{An Evaluation of Open Source SURF Implementations}},
url = {http://www.springerlink.com/content/fm25321663805536/fulltext.pdf},
volume = {6556},
year = {2011}
}
@techreport{Evans2009,
abstract = {In this document, the SURF detector-descriptor scheme used in the OpenSURF library is discussed in detail. First the algorithm is analysed from a theoretical standpoint to provide a detailed overview of how and why it works. Next the design and development choices for the implementation of the library are discussed and justified. During the implementation of the library, it was found that some of the ner details of the algorithm had been omitted or overlooked, so Section 1.5 serves to make clear the concepts which are not explicitly defined in the SURF paper [1].},
author = {Evans, Christopher},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Notes on the OpenSURF Library - Evans - 2009.pdf:pdf},
institution = {University of Bristol},
keywords = {Computer Vision},
number = {1},
pages = {25},
title = {{Notes on the OpenSURF Library}},
url = {http://opensurf1.googlecode.com/files/OpenSURF.pdf},
year = {2009}
}
@inproceedings{Taylor2009,
abstract = {In this paper we present a robust feature matching scheme in which features can be matched in 2.3 mus. For a typical task involving 150 features per image, this results in a processing time of 500 mus for feature extraction and matching. In order to achieve very fast matching we use simple features based on histograms of pixel intensities and an indexing scheme based on their joint distribution. The features are stored with a novel bit mask representation which requires only 44 bytes of memory per feature and allows computation of a dissimilarity score in 20 ns. A training phase gives the patch-based features invariance to small viewpoint variations. Larger viewpoint variations are handled by training entirely independent sets of features from different viewpoints. A complete system is presented where a database of around 13,000 features is used to robustly localise a single planar target in just over a millisecond, including all steps from feature detection to model fitting. The resulting system shows comparable robustness to SIFT and Ferns while using a tiny fraction of the processing time, and in the latter case a fraction of the memory as well.},
address = {Miami, FL},
author = {Taylor, Simon and Rosten, E. and Drummond, Tom},
booktitle = {CVPRW '09: Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops},
doi = {10.1109/CVPRW.2009.5204314},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Robust feature matching in 2.3µs - Taylor, Rosten, Drummond - 2009.pdf:pdf},
isbn = {978-1-4244-3994-2},
month = jun,
pages = {15--22},
publisher = {IEEE Computer Society},
title = {{Robust feature matching in 2.3µs}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5204314},
year = {2009}
}
@article{Wither2009,
abstract = {Annotation, the process of adding extra virtual information to an object, is one of the most common uses for augmented reality. Although annotation is widely used in augmented reality, there is no general agreed-upon definition of what precisely constitutes an annotation in this context. In this paper, we propose a taxonomy of annotation, describing what constitutes an annotation and outlining different dimensions along which annotation can vary. Using this taxonomy we also highlight what styles of annotation are used in different types of applications and areas where further work needs to be done to improve annotation. Through our taxonomy we found two primary categories into which annotations in current applications fall. Some annotations present information that is directly related to the object they are annotating, while others are only indirectly related to the object that is being annotated.We also found that there are very few applications that enable the user to either edit or create new annotations online. Instead, most applications rely on content that is created in various offline processes. There are, however, many advantages to online annotation. We summarize and formalize our recent work in this field by presenting the steps needed to build an online annotation system, looking most closely at techniques for placing annotations from a distance.},
author = {Wither, Jason and DiVerdi, Stephen and H\"{o}llerer, Tobias H.},
doi = {10.1016/j.cag.2009.06.001},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Annotation in outdoor augmented reality - Wither, DiVerdi, H\"{o}llerer - 2009.PDF:PDF},
issn = {00978493},
journal = {Computers \& Graphics},
keywords = {Annotation,Augmented reality,Online content creation,Taxonomy},
month = dec,
number = {6},
pages = {679--689},
title = {{Annotation in outdoor augmented reality}},
url = {http://linkinghub.elsevier.com/retrieve/pii/S0097849309000867},
volume = {33},
year = {2009}
}
@inproceedings{Cornelis2008,
abstract = {Ever since the introduction of freely programmable hardware components into modern graphics hardware, graphics processing units (GPUs) have become increasingly popular for general purpose computations. Especially when applied to computer vision algorithms where a Single set of Instructions has to be executed on Multiple Data (SIMD), GPU-based algorithms can provide a major increase in processing speed compared to their CPU counterparts. This paper presents methods that take full advantage of modern graphics card hardware for real-time scale invariant feature detection and matching. The focus lies on the extraction of feature locations and the generation of feature descriptors from natural images. The generation of these feature-vectors is based on the Speeded Up Robust Features (SURF) method [1] due to its high stability against rotation, scale and changes in lighting condition of the processed images. With the presented methods feature detection and matching can be performed at framerates exceeding 100 frames per second for 640 times 480 images. The remaining time can then be spent on fast matching against large feature databases on the GPU while the CPU can be used for other tasks.},
author = {Cornelis, Nico and {Van Gool}, Luc},
booktitle = {CVPRW '08: Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops},
doi = {10.1109/CVPRW.2008.4563087},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Fast scale invariant feature detection and matching on programmable graphics hardware - Cornelis, Van Gool - 2008.pdf:pdf},
isbn = {978-1-4244-2339-2},
keywords = {GPU,SURF,feature extraction,feature matching},
month = jun,
pages = {1--8},
publisher = {IEEE Computer Society},
title = {{Fast scale invariant feature detection and matching on programmable graphics hardware}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4563087},
year = {2008}
}
@inproceedings{Yang2010,
abstract = {The proliferation of camera-equipped mobile devices with enhanced mobile computing power and network connectivity results in a rising demand for mobile image search. Although image search has been studied extensively over the last few decades, most existing solutions are based on and optimized for desktop and server platforms, not for mobile devices. In this paper, we address some of the challenging issues unique in mobile search scenarios and suggest a list of potential solutions. As a case study, we design a mobile landmark image search system to evaluate the effectiveness of some proposed solutions. To enhance the mobile search experience, we propose a multimodal search scheme which uses both image content and user location to increase search precision and thus minimize network usage. We also suggest a post-search result pruning method designed to match the most relevant results to a user's search interests. Experiments conducted on our Landmark-450 image dataset demonstrate that the proposed methods can significantly increase the relevance of selected results in response to mobile image search while also reducing the amount of data transferred across the network.},
address = {San Francisco, CA},
author = {Yang, Xin and Pang, Sydney and Cheng, K. T. Tim},
booktitle = {CVPRW '10: Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops},
doi = {10.1109/CVPRW.2010.5543246},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Mobile image search with multimodal context-aware queries - Yang, Pang, Cheng - 2010.pdf:pdf},
isbn = {978-1-4244-7029-7},
month = jun,
pages = {25--32},
publisher = {IEEE Computer Society},
title = {{Mobile image search with multimodal context-aware queries}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5543246},
year = {2010}
}
@phdthesis{Wilson2005,
abstract = {As people grow older, they depend more heavily upon outside support for health assessment and medical care. The current healthcare infrastructure in America is widely considered to be inadequate to meet the needs of an increasingly older popu- lation. One solution, called aging in place, is to ensure that the elderly can live safely and independently in their own homes for as long as possible. Automatic health mon- itoring is a technological approach which helps people age in place by continuously providing key information to caregivers. In this thesis, we explore automatic health monitoring on several levels. First, we conduct a two-phased formative study to examine the work practices of profes- sionals who currently perform in-home monitoring for elderly clients. With these findings in mind, we introduce the simultaneous tracking and activity recognition (STAR) problem, whose solution provides vital information for automatic in-home health monitoring. We describe and evaluate a particle filter approach that uses data from simple sensors commonly found in home security systems to provide room-level tracking and activity recognition. Next, we introduce the “context-aware recognition survey,” a novel data collection method that helps users label anonymous episodes of activity for use as training examples in a supervised learner. Finally, we introduce the k-Edits Viterbi algorithm, which works within a Bayesian framework to automatically rate routine activities and detect irregular patterns of behavior. This thesis contributes to the field of automatic health monitoring through a com- bination of intensive background study, efficient approaches for location and activity inference, a novel unsupervised data collection technique, and a practical activity rating application.},
author = {Wilson, Daniel H.},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Assistive Intelligent Environments for Automatic Health Monitoring - Wilson - 2005.pdf:pdf},
pages = {185},
school = {Carnegie Mellon University},
title = {{Assistive Intelligent Environments for Automatic Health Monitoring}},
year = {2005}
}
@inproceedings{Muja2009,
abstract = {For many computer vision problems, the most time consuming component consists of nearest neighbor matching in high-dimensional spaces. There are no known exact algorithms for solving these high-dimensional problems that are faster than linear search. Approximate algorithms are known to provide large speedups with only minor loss in accuracy, but many such algorithms have been published with only minimal guidance on selecting an algorithm and its parameters for any given problem. In this paper, we describe a system that answers the question, “What is the fastest approximate nearest-neighbor algorithm for my data? ” Our system will take any given dataset and desired degree of precision and use these to automatically determine the best algorithm and parameter values. We also describe a new algorithm that applies priority search on hierarchical k-means trees, which we have found to provide the best known performance on many datasets. After testing a range of alternatives, we have found that multiple randomized k-d trees provide the best performance for other datasets. We are releasing public domain code that implements these approaches. This library provides about one order of magnitude improvement in query time over the best previously available software and provides fully automated parameter selection.},
address = {Lisbon, Portugal},
author = {Muja, Marius and Lowe, David G.},
booktitle = {VISAPP '09: Proceedings of the International Conference on Computer Vision Theory and Applications},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Fast approximate nearest neighbors with automatic algorithm configuration - Muja, Lowe - 2009.pdf:pdf},
keywords = {clustering,hierarchical k-means tree,nearest-neighbors search,randomized kd-trees},
pages = {331--340},
title = {{Fast approximate nearest neighbors with automatic algorithm configuration}},
url = {http://www.cs.ubc.ca/~lowe/papers/09muja.pdf },
year = {2009}
}
@inproceedings{Turcot2009,
abstract = {There has been recent progress on the problem of recog- nizing specific objects in very large datasets. Themost com- mon approach has been based on the bag-of-words (BOW) method, in which local image features are clustered into vi- sual words. This can provide significant savings in mem- ory compared to storing and matching each feature inde- pendently. In this paper we take an additional step to re- ducing memory requirements by selecting only a small sub- set of the training features to use for recognition. This is based on the observation that many local features are unre- liable or represent irrelevant clutter. We are able to select “useful” features, which are both robust and distinctive, by an unsupervised preprocessing step that identifies correctly matching features among the training images. We demon- strate that this selection approach allows an average of 4\% of the original features per image to provide matching per- formance that is as accurate as the full set. In addition, we employ a graph to represent the matching relationships be- tween images. Doing so enables us to effectively augment the feature set for each image throughmerging of useful fea- tures of neighboring images. We demonstrate adjacent and 2-adjacent augmentation, both of which give a substantial boost in performance. (a)},
address = {Kyoto},
author = {Turcot, Panu and Lowe, David G.},
booktitle = {ICCVW '09: Proceedings of the 12th IEEE International Conference on Computer Vision Workshops},
doi = {10.1109/ICCVW.2009.5457541},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Better matching with fewer features The selection of useful features in large database recognition problems - Turcot, Lowe - 2009.pdf:pdf},
isbn = {978-1-4244-4442-7},
month = sep,
pages = {2109--2116},
publisher = {IEEE Computer Society},
title = {{Better matching with fewer features: The selection of useful features in large database recognition problems}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5457541},
year = {2009}
}
@inproceedings{Sargent2009,
abstract = {The ability to detect and match features across multiple views of a scene is a crucial first step in many computer vision algorithms for dynamic scene analysis. State-of-the-art methods such as SIFT and SURF perform successfully when applied to typical images taken by a digital camera or camcorder. However, these methods often fail to generate an acceptable number of features when applied to medical images, because such images usually contain large homogeneous regions with little color and intensity variation. As a result, tasks like image registration and 3D structure recovery become difficult or impossible in the medical domain. This paper presents a scale, rotation and color/illumination invariant feature detector and descriptor for medical applications. The method incorporates elements of SIFT and SURF while optimizing their performance on medical data. Based on experiments with various types of medical images, we combined, adjusted, and built on methods and parameter settings employed in both algorithms. An approximate Hessian based detector is used to locate scale invariant keypoints and a dominant orientation is assigned to each keypoint using a gradient orientation histogram, providing rotation invariance. Finally, keypoints are described with an orientation-normalized distribution of gradient responses at the assigned scale, and the feature vector is normalized for contrast invariance. Experiments show that the algorithm detects and matches far more features than SIFT and SURF on medical images, with similar error levels.},
author = {Sargent, Dusty and Chen, Chao-I and Tsai, Chang-Ming and Wang, Yuan-Fang and Koppel, Daniel},
booktitle = {Proceedings of the SPIE Medical Imaging Conference},
doi = {10.1117/12.811210},
editor = {Hsieh, Jiang and Samei, Ehsan},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Feature Detector and Descriptor for Medical Images - Sargent et al. - 2009.pdf:pdf},
keywords = {descriptor,detector,feature,keypoint},
publisher = {Society of Photo Optical},
series = {Proceedings of the SPIE},
title = {{Feature Detector and Descriptor for Medical Images}},
url = {http://link.aip.org/link/PSISDG/v7259/i1/p72592Z/s1\&Agg=doi},
volume = {7258},
year = {2009}
}
@inproceedings{Newman2006,
abstract = {We have developed a hand-held augmented reality platform exploiting a combination of multiple sensors built around an ultra-wideband tracking system. We demonstrate two applications illustrating how an environment exploiting this platform can be set up. Firstly, a technician-support application provides intuitive in-situ instructions on how a wide area tracking system should be configured. The use of 3D registered graphics greatly assists in the debugging of common awkward use cases involving reflections off metal surfaces. Secondly, a navigation application utilises this newly configured and calibrated tracker, as well as other sensors, adapting to whatever is available in a given locale},
address = {Dublin},
author = {Newman, Joseph and Schall, Gerhard and Barakonyi, Istv\'{a}n and Sch\"{u}rzinger, Andreas and Schmalstieg, Dieter},
booktitle = {Advances in Pervasive Computing 2006},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Wide-Area Tracking Tools for Augmented Reality - Newman et al. - 2006.pdf:pdf},
isbn = {3854032072},
pages = {7--10},
publisher = {Austrian Computer Society},
title = {{Wide-Area Tracking Tools for Augmented Reality}},
volume = {207},
year = {2006}
}
@article{Rashid2006,
abstract = {RFID (Radio frequency identification) is often seen as an enabling technology for mixed-reality experiences where all kinds of objects, even the most mundane and inanimate, can be equipped to provide interaction between the real and virtual worlds. These mixed-reality experiences could occur in all aspects of our lives, but one of the most easily envisaged is that of computer games. As the mobile phone has become the computer carried in the pockets of a third of the population of the planet, it would seem a natural platform for these mixed-reality games. Further, the emergence of mobile phones that incorporate RFID readers gives the opportunity for creating games in which players interact with real physical objects, in real locations, and provides enhanced gameplay and experience. In this article we present details of a novel location- and object-enhanced mixed-reality version of the Namco arcade classic, Pacman. In particular, the article presents a comparison of the game to other mixed-reality versions of Pacman; the rationale behind specific design choices made during game design and its subsequent implementation; and an analysis of the experiences of people who have played the game. Our system highlights the possibilities via use of physical objects and the combination of mobile phones and RFID of yielding new mixed-reality entertainment experiences.},
author = {Rashid, Omer and Bamford, Will and Coulton, Paul and Edwards, Reuben and Scheible, Jurgen},
doi = {10.1145/1178418.1178425},
file = {:D$\backslash$:/\_Docs/mendeleyMain/PAC-LAN Mixed-Reality Gaming with RFID- Enabled Mobile Phones - Rashid, Bamford, Coulton - 2006.pdf:pdf},
issn = {15443574},
journal = {Computers in Entertainment},
month = oct,
number = {4},
pages = {1--17},
title = {{PAC-LAN: mixed-reality gaming with RFID-enabled mobile phones}},
url = {http://portal.acm.org/citation.cfm?doid=1178418.1178425},
volume = {4},
year = {2006}
}
@inproceedings{Chawathe2008,
abstract = {We describe a method for determining the location of a mobile device, such as a handheld computer or mobile phone, in an indoor environment using Bluetooth beacons. Since it uses inexpensive commodity devices, this method is inexpensive to deploy. The limited range of Bluetooth reception is used to advantage. Another important advantage of this method is that it allows the mobile device to determine its location while remaining anonymous, unidentified to the beacons or other nearby devices. In such a deployment, an important design task is the placement of beacons. Signal propagation in indoor environments is complex, affected by factors such as floor-plans and duct-work, varying transmission and reflection properties of building materials and furniture, and interference from other devices. Therefore, the area from which a beacon is visible is very irregular and not well approximated by simple models such as ellipsoids. Our solution permits complex reception characteristics to be accurately modeled and provides a simple method for choosing beacon locations.},
address = {Beijing},
author = {Chawathe, Sudarshan S.},
booktitle = {ITSC '08: Proceedings of the 11th International IEEE Conference on Intelligent Transportation Systems},
doi = {10.1109/ITSC.2008.4732690},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Beacon Placement for Indoor Localization using Bluetooth - Chawathe - 2008.pdf:pdf},
isbn = {978-1-4244-2111-4},
month = oct,
pages = {980--985},
publisher = {IEEE Computer Society},
title = {{Beacon Placement for Indoor Localization using Bluetooth}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4732690},
year = {2008}
}
@inproceedings{Wagner2009a,
abstract = {In this paper we present a novel method for real-time pose estimation and tracking on low-end devices such as mobile phones. The presented system can track multiple known targets in real-time and simultaneously detect new targets for tracking. We present a method to automatically and dynamically balance the quality of detection and tracking to adapt to a variable time budget and ensure a constant frame rate. Results from real data of a mobile phone Augmented Reality system demonstrate the efficiency and robustness of the described approach. The system can track 6 planar targets on a mobile phone simultaneously at framerates of 23 fps.},
address = {Orlando, FL},
author = {Wagner, Daniel and Schmalstieg, Dieter and Bischof, Horst},
booktitle = {ISMAR '09: Proceedings of the 8th IEEE International Symposium on Mixed and Augmented Reality},
doi = {10.1109/ISMAR.2009.5336497},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Multiple target detection and tracking with guaranteed framerates on mobile phones - Wagner, Schmalstieg, Bischof - 2009.pdf:pdf},
isbn = {978-1-4244-5390-0},
month = oct,
pages = {57--64},
publisher = {IEEE Computer Society},
title = {{Multiple target detection and tracking with guaranteed framerates on mobile phones}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5336497},
year = {2009}
}
@inproceedings{Santos,
abstract = {In this paper we introduce an innovative application designed tomake collaborative design review in the architectural and automotive domainmore effective. For this purpose we present a system architecture whichcombines variety of visualization displays such as high resolution multitile displays, TabletPCs and head-mounted displays with innovative 2D and 3DInteraction Paradigms to better support collaborative mobile mixed realitydesign reviews. Our research and development is motivated by two usescenarios: automotive and architectural design review involving real users fromPage$\backslash$Park architects and FIAT Elasis. Our activities are supported by the EUIST project IMPROVE aimed at developing advanced display techniques,fostering activities in the areas of: optical see-through HMD development usingunique OLED technology, marker-less optical tracking, mixed reality rendering,image calibration for large tiled displays, collaborative tablet-based andprojection wall oriented interaction and stereoscopic video streaming for mobileusers. The paper gives an overview of the hardware and software developmentswithin IMPROVE and concludes with results from first user tests.},
author = {Santos, Pedro and Stork, Andr\'{e} and Gierlinger, Thomas and Pagani, Alain and Ara\'{u}jo, Bruno and Jota, Ricardo and Bruno, Luis and Jorge, Joaquim and Pereira, Joao Madeiras and Witzel, Martin and Conti, Giuseppe and Amicis, Raffaele De and Paloc, C\'{e}line and Machui, Oliver and Jim\'{e}nez, Jose M. and Bodammer, Georg and Mcintyre, Don},
booktitle = {ICVR '07: Proceedings of the 2nd International Conference on Virtual Reality},
file = {:D$\backslash$:/\_Docs/mendeleyMain/IMPROVE Collaborative Design Review in Mobile Mixed Reality - Santos et al. - 2007.pdf:pdf},
pages = {543--553},
publisher = {Springer-Verlag},
title = {{IMPROVE: Collaborative Design Review in Mobile Mixed Reality}},
year = {2007}
}
@inproceedings{Arth2009,
abstract = {We present a fast and memory efficient method for localizing a mobile user's 6DOF pose from a single camera image. Our approach registers a view with respect to a sparse 3D point reconstruction. The 3D point dataset is partitioned into pieces based on visibility constraints and occlusion culling, making it scalable and efficient to handle. Starting with a coarse guess, our system only considers features that can be seen from the user's position. Our method is resource efficient, usually requiring only a few megabytes of memory, thereby making it feasible to run on low-end devices such as mobile phones. At the same time it is fast enough to give instant results on this device class.},
address = {Orlando, FL},
author = {Arth, Clemens and Wagner, Daniel and Klopschitz, Manfred and Irschara, Arnold and Schmalstieg, Dieter},
booktitle = {ISMAR '09: Proceedings of the 8th IEEE International Symposium on Mixed and Augmented Reality},
doi = {10.1109/ISMAR.2009.5336494},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Wide area localization on mobile phones - Arth et al. - 2009.pdf:pdf},
isbn = {978-1-4244-5390-0},
month = oct,
pages = {73--82},
publisher = {IEEE},
title = {{Wide area localization on mobile phones}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5336494},
year = {2009}
}
@article{Mulloni2009,
abstract = {This low-cost indoor navigation system runs on off-the-shelf camera phones. More than 2,000 users at four different large-scale events have already used it. The system uses built-in cameras to determine user location in real time by detecting unobtrusive fiduciary markers. The required infrastructure is limited to paper markers and static digital maps, and common devices are used, facilitating quick deployment in new environments. The authors have studied the application quantitatively in a controlled environment and qualitatively during deployment at four large international events. According to test users, marker-based navigation is easier to use than conventional mobile digital maps. Moreover, the users' location awareness in navigation tasks improved. Experiences drawn from questionnaires, usage log data, and user interviews further highlight the benefits of this approach.},
author = {Mulloni, Alessandro and Wagner, Daniel and Barakonyi, Istv\'{a}n and Schmalstieg, Dieter},
doi = {10.1109/MPRV.2009.30},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Indoor Positioning and Navigation with Camera Phones - Mulloni et al. - 2009.pdf:pdf},
issn = {1536-1268},
journal = {IEEE Pervasive Computing},
month = apr,
number = {2},
pages = {22--31},
title = {{Indoor Positioning and Navigation with Camera Phones}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4814934},
volume = {8},
year = {2009}
}
@article{Lowe2004,
author = {Lowe, David G.},
doi = {10.1023/B:VISI.0000029664.99615.94},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Distinctive Image Features from Scale-Invariant Keypoints - Lowe - 2004.pdf:pdf},
issn = {0920-5691},
journal = {International Journal of Computer Vision},
month = nov,
number = {2},
pages = {91--110},
title = {{Distinctive Image Features from Scale-Invariant Keypoints}},
url = {http://www.springerlink.com/openurl.asp?id=doi:10.1023/B:VISI.0000029664.99615.94},
volume = {60},
year = {2004}
}
@inproceedings{Bell2001,
abstract = {We describe a view-management component for interactive 3D user interfaces. By view management, we mean maintaining visual constraints on the projections of objects on the view plane, such as locating related objects near each other, or preventing objects from occluding each other. Our view-management component accomplishes this by modifying selected object properties, including position, size, and transparency, which are tagged to indicate their constraints. For example, some objects may have geometric properties that are determined entirely by a physical simulation and which cannot be modified, while other objects may be annotations whose position and size are flexible.We introduce algorithms that use upright rectangular extents to represent on the view plane a dynamic and efficient approximation of the occupied space containing the projections of visible portions of 3D objects, as well as the unoccupied space in which objects can be placed to avoid occlusion. Layout decisions from previous frames are taken into account to reduce visual discontinuities. We present augmented reality and virtual reality examples to which we have applied our approach, including a dynamically labeled and annotated environment.},
address = {New York, New York, USA},
author = {Bell, Blaine and Feiner, Steven K. and H\"{o}llerer, Tobias H.},
booktitle = {UIST '01: Proceedings of the 14th annual ACM Symposium on User Interface Software and Technology},
doi = {10.1145/502360.502363},
file = {:D$\backslash$:/\_Docs/mendeleyMain/View management for virtual and augmented reality - Bell, Feiner, H\"{o}llerer - 2001.pdf:pdf},
isbn = {158113438X},
pages = {101--110},
publisher = {ACM Press},
title = {{View management for virtual and augmented reality}},
url = {http://portal.acm.org/citation.cfm?doid=502348.502363},
year = {2001}
}
@phdthesis{MacWilliams2004,
author = {MacWilliams, Asa},
file = {:D$\backslash$:/\_Docs/mendeleyMain/A Decentralized Adaptive Architecture for Ubiquitous Augmented Reality Systems - MacWilliams - 2004.pdf:pdf},
pages = {208},
school = {Technische Universit\"{a}t M\"{u}nchen},
title = {{A Decentralized Adaptive Architecture for Ubiquitous Augmented Reality Systems}},
year = {2004}
}
@incollection{Hollerer2004,
address = {London, UK},
author = {H\"{o}llerer, Tobias H. and Feiner, Steven K.},
booktitle = {Telegeoinformatics: Location-Based Computing and Services},
editor = {Karimi, Hassan A. and Hammad, Amin},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Mobile Augmented Reality - H\"{o}llerer, Feiner - 2004.pdf:pdf},
isbn = {978-0415369763},
pages = {1--39},
publisher = {Taylor and Francis Books Ltd.},
title = {{Mobile Augmented Reality}},
url = {http://www.cs.ucsb.edu/~holl/pubs/hollerer-2004-tandf.pdf},
year = {2004}
}
@inproceedings{Dow2005,
abstract = {The Historic Oakland Cemetery in downtown Atlanta provides a unique setting for exploring the challenges of location-based mixed-reality experience design. Our objective is to entertain and educate visitors about historically and culturally significant events related to the deceased inhabitants of the cemetery. We worked with the constraints and affordances of the physical environment of the cemetery to design an audio-based dramatic experience. The dramatic narrative is realized through voice actors who play the parts of cemetery residents and tell stories about the time periods in which they lived. The experience provides navigation and linearity through a main narrator who guides visitors to various gravesites. While at each grave, the visitor can choose from several categories of content using a handheld controller. Formative evaluations conducted with users in the cemetery indicate strengths of the current experience and suggest ideas for continued development.},
address = {New York, New York, USA},
author = {Dow, Steven and Lee, Jaemin and Oezbek, Christopher and MacIntyre, Blair and Bolter, Jay David and Gandy, Maribeth},
booktitle = {ACE '05: Proceedings of the 2005 ACM SIGCHI International Conference on Advances in Computer Entertainment Technology},
doi = {10.1145/1178477.1178484},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Exploring spatial narratives and mixed reality experiences in Oakland Cemetery - Dow et al. - 2005.pdf:pdf},
isbn = {1595931104},
pages = {51--60},
publisher = {ACM Press},
title = {{Exploring spatial narratives and mixed reality experiences in Oakland Cemetery}},
url = {http://portal.acm.org/citation.cfm?doid=1178477.1178484},
year = {2005}
}
@incollection{Feiner1999,
author = {Feiner, Steven K. and MacIntyre, Blair and H\"{o}llerer, Tobias H.},
booktitle = {Mixed Reality: Merging Real and Virtual Worlds},
chapter = {20},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Wearing It Out First Steps Toward Mobile Augmented Reality Systems - Feiner, MacIntyre, H\"{o}llerer - 1999.pdf:pdf},
pages = {363--377},
publisher = {Springer-Verlag},
title = {{Wearing It Out: First Steps Toward Mobile Augmented Reality Systems}},
year = {1999}
}
@inproceedings{Schwerdtfeger2008,
abstract = {We report on recent progress in the iterative process of exploring, evaluating and refining Augmented Reality-based methods to support the order picking process. We present our findings from three user studies and from demonstrations at several exhibitions. The resulting setup is a combined visualization to precisely and efficiently guide the user, even if the augmentation is not always in the field of view of the HMD.},
address = {Cambridge, UK},
author = {Schwerdtfeger, Bj\"{o}rn and Klinker, Gudrun},
booktitle = {ISMAR '08: Proceedings of the 7th IEEE and ACM International Symposium on Mixed and Augmented Reality},
doi = {10.1109/ISMAR.2008.4637331},
keywords = {Augmented Reality,Multimedia Information Systems,User Interface Evaluations,User-centered Design},
pages = {91--94},
title = {{Supporting Order Picking with Augmented Reality}},
year = {2008}
}
@article{Vlahakis2002,
abstract = {The paper discusses Archeoguide which offers personalized augmented reality tours of archaeological sites. It uses outdoor tracking, mobile computing, 3D visualization and augmented reality techniques to enhance information presentation, reconstruct ruined sites, and simulate ancient life},
author = {Vlahakis, Vassilios and Ioannidis, Nikolaos and Karigiannis, John and Tsotros, Manolis and Gounaris, Michael and Stricker, Didier and Gleue, Tim and Daehne, Patrick and Almeida, Lu\'{\i}s},
doi = {10.1109/MCG.2002.1028726},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Archeoguide an augmented reality guide for archaeological sites - Vlahakis et al. - 2002.pdf:pdf},
issn = {0272-1716},
journal = {IEEE Computer Graphics and Applications},
month = sep,
number = {5},
pages = {52--60},
title = {{Archeoguide: an augmented reality guide for archaeological sites}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=1028726},
volume = {22},
year = {2002}
}
@inproceedings{Dow2007,
abstract = {In this paper we present the results of a qualitative, empirical study exploring the impact of immersive technologies on presence and engagement, using the interactive drama Fa\c{c}ade as the object of study. In this drama, players are situated in a married couple's apartment, and interact primarily through conversation with the characters and manipulation of objects in the space. We present participants' experiences across three different versions of Fa\c{c}ade -- augmented reality (AR) and two desktop computing based implementations, one where players communicate using speech and the other using typed keyboard input. Through interviews and observations of players, we find that immersive AR can create an increased sense of presence, confirming generally held expectations. However, we demonstrate that increased presence does not necessarily lead to more engagement. Rather, mediation may be necessary for some players to fully engage with certain interactive media experiences.},
address = {New York, New York, USA},
author = {Dow, Steven and Mehta, Manish and Harmon, Ellie and MacIntyre, Blair and Mateas, Michael},
booktitle = {CHI '07: Proceedings of the 26th SIGCHI Conference on Human Factors in Computing Systems},
doi = {10.1145/1240624.1240847},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Presence and engagement in an interactive drama - Dow et al. - 2007.pdf:pdf},
isbn = {9781595935939},
pages = {1475},
publisher = {ACM Press},
title = {{Presence and engagement in an interactive drama}},
url = {http://portal.acm.org/citation.cfm?doid=1240624.1240847},
year = {2007}
}
@article{Thomas2002,
abstract = {This paper presents a first person outdoor/indoor augmented reality application ARQuake that we have developed. ARQuake is an extension of the desktop game Quake, and as such we are investigating how to convert a desktop first person application into an outdoor/indoor mobile augmented reality application. We present an architecture for a low cost, moderately accurate six degrees of freedom tracking system based on GPS, digital compass, and fiducial vision-based tracking. Usability issues such as monster selection, colour, input devices, and multi-person collaboration are discussed.},
author = {Thomas, Bruce H. and Close, Ben and Donoghue, John and Squires, John and Bondi, Phillip De and Piekarski, Wayne},
doi = {10.1007/s007790200007},
file = {:D$\backslash$:/\_Docs/mendeleyMain/First Person IndoorOutdoor Augmented Reality Application ARQuake - Thomas et al. - 2002.pdf:pdf},
issn = {1617-4909},
journal = {Personal and Ubiquitous Computing},
keywords = {augmented reality,computer games,wearable computers},
month = feb,
number = {1},
pages = {75--86},
title = {{First Person Indoor/Outdoor Augmented Reality Application: ARQuake}},
url = {http://www.springerlink.com/openurl.asp?genre=article\&id=doi:10.1007/s007790200007},
volume = {6},
year = {2002}
}
@inproceedings{Feiner1997,
abstract = {We describe a prototype system that combines together the overlaid 3D graphics of augmented reality with the untethered freedom of mobile computing. The goal is to explore how these two technologies might together make possible wearable computer systems that can support users in their everyday interactions with the world. We introduce an application that presents information about our university's campus, using a head-tracked, see-through, head-worn, 3D display, and an untracked, opaque, handheld, 2D display with stylus and trackpad. We provide an illustrated explanation of how our prototype is used, and describe our rationale behind designing its software infrastructure and selecting the hardware on which it runs.},
address = {Cambridge, MA, USA},
author = {Feiner, Steven K. and MacIntyre, Blair and H\"{o}llerer, Tobias H. and Webster, Anthony},
booktitle = {ISWC '97: Proceedings of the 1st International Symposium on Wearable Computers},
doi = {10.1109/ISWC.1997.629922},
file = {:D$\backslash$:/\_Docs/mendeleyMain/A touring machine prototyping 3D mobile augmented reality systems for exploring the urban environment - Feiner et al. - 1997.pdf:pdf},
isbn = {0-8186-8192-6},
pages = {74--81},
publisher = {IEEE Computer Society},
title = {{A touring machine: prototyping 3D mobile augmented reality systems for exploring the urban environment}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=629922},
volume = {97},
year = {1997}
}
@inproceedings{Schmalstieg2007,
abstract = {In this paper, we present Studierstube ES, a framework for the development of handheld Augmented Reality. The applications run self-contained on handheld computers and smartphones with Windows CE. A detailed description of the performance critical tracking and rendering components are given. We also report on the implementation of a client-server architecture for multi-user applications, and a game engine for location based museum games that has been built on top of this infrastructure. Details on two games that were created, permanently deployed and evaluated in two Austrian museums illustrate the practical value of the framework and lessons learned from using it.},
address = {Nara, Japan},
author = {Schmalstieg, Dieter and Wagner, Daniel},
booktitle = {ISMAR '07: Proceedings of the 6th IEEE and ACM International Symposium on Mixed and Augmented Reality},
doi = {10.1109/ISMAR.2007.4538819},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Experiences with Handheld Augmented Reality - Schmalstieg, Wagner - 2007.pdf:pdf},
isbn = {978-1-4244-1749-0},
keywords = {augmented reality games,cultural heritage,mobile augmented reality,wearable computing},
month = nov,
pages = {3--18},
publisher = {IEEE Computer Society},
title = {{Experiences with Handheld Augmented Reality}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4538819},
year = {2007}
}
@inproceedings{Eaddy2004,
abstract = {Ubiquitous, high-resolution, large public displays offer an attractive complement to wearable displays. Unfortunately, the inherently public nature of these public displays makes them unsuitable for displaying sensitive information. We present EyeGuide, a wearable system that allows the user to obtain information quickly from a public display without sacrificing privacy. To this end, EyeGuide employs a lightweight head-worn eye-tracker for hands-free object selection and an earphone for private communication. Our system supports public displays that are dynamic (e.g., a large plasma screen) and static (e.g., a large printed map). In our printed map scenario, EyeGuide whispers verbal directions via earphone to a user, based on where they are looking on the map. Using a technique we call "gaze steering," the system guides the user's eye position to specific locations. In our dynamic public display scenarios, EyeGuide presents documents (e.g., maps) that contain sensitive data in a way that preserves privacy.},
author = {Eaddy, Marc and Blasko, G\'{a}bor and Babcock, Jason and Feiner, Steven K.},
booktitle = {ISWC '04: Proceedings of the 8th International Symposium on Wearable Computers},
doi = {10.1109/ISWC.2004.32},
file = {:D$\backslash$:/\_Docs/mendeleyMain/My Own Private Kiosk Privacy-Preserving Public Displays - Eaddy et al. - 2004.pdf:pdf},
isbn = {0-7695-2186-X},
keywords = {audio augmented reality,eye tracking,gaze steering,gaze-contingent,privacy,public display,rochester institute of technology,visual perception laboratory},
pages = {132--135},
publisher = {IEEE Computer Society},
title = {{My Own Private Kiosk: Privacy-Preserving Public Displays}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=1364701},
year = {2004}
}
@inproceedings{Lang2002,
abstract = {Augmented Reality applications require the tracking of moving objects in real-time. Tracking is defined as the measurement of object position and orientation in a scene coordinate system. We present a new combination of silicon micromachined accelerometers and gyroscopes which have been assembled into a six degree of freedom (6 DoF) inertial tracking system. This inertial tracker is used in combination with a vision-based tracking system which will enable us to build affordable, light-weight, fully mobile tracking systems for Augmented Reality applications in the future.},
author = {Lang, Peter and Kusej, Albert and Pinz, Axel and Brasseur, Georg},
booktitle = {IMTC '02: Proceedings of the 19th IEEE Instrumentation and Measurement Technology Conference},
doi = {10.1109/IMTC.2002.1007196},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Inertial Tracking for Mobile Augmented Reality - Lang et al. - 2002.pdf:pdf},
isbn = {0-7803-7218-2},
keywords = {6 dof tracking,augmented reality,inertial sensors,real time tracking},
pages = {1583--1587},
publisher = {IEEE},
title = {{Inertial Tracking for Mobile Augmented Reality}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=1007196},
year = {2002}
}
@inproceedings{Pressigout2006,
abstract = {Augmented Reality (AR) aims to fuse a virtual world and a real one in an image stream. When considering only a vision sensor, it relies on registration techniques that have to be accurate and fast enough for on-line augmentation. This paper proposes a real-time, robust and efficient 3D model-based tracking algorithm monocu- lar vision system. A virtual visual servoing approach is used to estimate the pose between the camera and the object. The integra- tion of texture information in the classical non-linear edge-based pose computation provides a more reliable tracker. Several illumi- nation models have been considered and compared to better deal with the illumination change in the scene. The method presented in this paper has been validated on several video sequences for augmented reality applications.},
author = {Pressigout, Muriel and Marchand, \'{E}ric},
booktitle = {ISMAR '06: Proceedings of the 5th IEEE and ACM International Symposium on Mixed and Augmented Reality},
doi = {10.1109/ISMAR.2006.297794},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Hybrid tracking algorithms for planar and non-planar structures subject to illumination changes - Pressigout, Marchand - 2006.pdf:pdf},
isbn = {1-4244-0650-1},
month = oct,
pages = {52--55},
publisher = {IEEE Computer Society},
title = {{Hybrid tracking algorithms for planar and non-planar structures subject to illumination changes}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4079256},
year = {2006}
}
@inproceedings{Sziebig2009,
abstract = {Virtual Environments allow users to interact with virtual worlds, but usually these interactions are in front of a monitor or a projected wall. Augmented Reality brings the feeling of reality to these interactions. The user can combine the virtual world with the virtual world. Interact and feel the objects, together with artefacts that are generated by a computer. This article provides an overview and introduction to the problems of Augmented Reality systems and shows the state of the art in current technological developments. The usability bottlenecks of existing systems and probable solutions are also discussed.},
address = {Budapest, Hungary},
author = {Sziebig, Gabor},
booktitle = {SMO '09: Proceedings of the 9th WSEAS international conference on Simulation, Modelling and Optimization},
editor = {Imre, Rudas and Demiralp, Metin and Mastorakis, Nikos},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Achieving total immersion Technology trends behind Augmented Reality - A survey - Sziebig - 2009.pdf:pdf},
keywords = {Augmented Reality,Head-mounted displays,Immersion,Trackers,Usability},
pages = {458--463},
publisher = {World Scientific and Engineering Academy and Society (WSEAS)},
title = {{Achieving total immersion: Technology trends behind Augmented Reality - A survey}},
year = {2009}
}
@inproceedings{Kato2000,
abstract = {We address the problems of virtual object interaction and user tracking in a table-top augmented reality (AR) interface. In this setting there is a need for very accurate tracking and registration techniques and an intuitive and useful interface. This is especially true in AR interfaces for supporting face to face collaboration where users need to be able to easily cooperate with each other. We describe an accurate vision-based tracking method for table-top AR environments and tangible user interface (TUI) techniques based on this method that allow users to manipulate virtual objects in a natural and intuitive manner. Our approach is robust, allowing users to cover some of the tracking markers while still returning camera viewpoint information, overcoming one of the limitations of traditional computer vision based systems. After describing this technique we describe its use in prototype AR applications},
address = {Munich, Germany},
author = {Kato, Hirokazu and Billinghurst, Mark and Poupyrev, I. and Imamoto, K. and Tachibana, K.},
booktitle = {ISAR '00: Proceedings of the IEEE and ACM International Symposium on Augmented Reality},
doi = {10.1109/ISAR.2000.880934},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Virtual object manipulation on a table-top AR environment - Kato et al. - 2000.pdf:pdf},
isbn = {0-7695-0846-4},
pages = {111--119},
publisher = {IEEE Computer Society},
title = {{Virtual object manipulation on a table-top AR environment}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=880934},
year = {2000}
}
@incollection{Billinghurst2009,
abstract = {Ambient Intelligence has the goal of embedding context-sensitive technology that disappears into the user’s surroundings. In many ways Augmented Reality (AR) is complimentary to this in that AR interfaces seamlessly enhance the user’s real environment with virtual information overlay. The two merge together in context aware Ambient AR applications, which allow users to easily perceive and interact with Ambient AR Interfaces by using AR overlay of the real world. In this chapter we describe how Tangible Interaction techniques can be used to design Human Centric Interfaces for Ambient AR applications. Tangible interaction techniques are those that use real world object manipulation. Users already know how to handle real world objects and so very intuitive interfaces can be developed by building interaction techniques around object manipulation. Examples will be drawn from current and previous research in Augmented Reality and Ambient Interfaces, and design guidelines will be given to show how Human Centric tangible AR interfaces can be developed.},
author = {Billinghurst, Mark and Grasset, Rapha\"{e}l and Seichter, Hartmut},
booktitle = {Human-Centric Interfaces for Ambient Intelligence},
chapter = {11},
editor = {Aghajan, Hamid and Augusto, Juan Carlos and Delgado, Ramon Lopez-Cozar},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Tangible Interfaces for Ambient Augmented Reality Applications - Billinghurst, Grasset, Seichter - 2009.pdf:pdf},
isbn = {978-0-12-374708-2},
keywords = {Ambient Intelligence,Augmented Reality,Interaction Design,Tangible User Interfaces},
number = {November 2008},
pages = {281--298},
publisher = {Academic Press},
title = {{Tangible Interfaces for Ambient Augmented Reality Applications}},
url = {http://www.hitlabnz.org/administrator/components/com\_jresearch/files/publications/2009-Tangible\_Interfaces\_for\_Ambient\_Augmented.pdf},
year = {2009}
}
@inproceedings{Hagbi2009,
abstract = {In this paper we present Nestor, a system for real-time recognition and camera pose estimation from planar shapes. The system allows shapes that carry contextual meanings for humans to be used as augmented reality (AR) tracking fiducials. The user can teach the system new shapes at runtime by showing them to the camera. The learned shapes are then maintained by the system in a shape library. Nestor performs shape recognition by analyzing contour structures and generating projective invariant signatures from their concavities. The concavities are further used to extract features for pose estimation and tracking. Pose refinement is carried out by minimizing the reprojection error between sample points on each image contour and its library counterpart. Sample points are matched by evolving an active contour in real time. Our experiments show that the system provides stable and accurate registration, and runs at interactive frame rates on a Nokia N95 mobile phone.},
address = {Orlando, FL},
author = {Hagbi, Nate and Bergig, Oriel and El-Sana, Jihad and Billinghurst, Mark},
booktitle = {ISMAR '09: Proceedings of the 8th IEEE International Symposium on Mixed and Augmented Reality},
doi = {10.1109/ISMAR.2009.5336498},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Shape recognition and pose estimation for mobile augmented reality - Hagbi et al. - 2009.pdf:pdf},
isbn = {978-1-4244-5390-0},
issn = {1077-2626},
keywords = {3D pose estimation,In-Place Augmented Reality,free-hand sketching,geometric projective invariance,handheld AR,shape dual perception,shape recognition,vision-based tracking},
month = oct,
pages = {65--71},
pmid = {21041876},
publisher = {IEEE Computer Society},
title = {{Shape recognition and pose estimation for mobile augmented reality}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5336498},
year = {2009}
}
@inproceedings{Hile2007,
abstract = {Increasingly, cell phones are used to browse for information while location systems assist in gathering information that is most appropriate to the user's current location. We seek to take this one step further and actually overlay information on to the physical world using the cell phone's camera and thereby minimize a user's cognitive effort. This "magic lens" approach has many applications of which we are exploring two: indoor building navigation and dynamic directory assistance. In essence, we match "landmarks" identified in the camera image with those stored in a building database. We use two different types of features - floor corners that can be matched against a floorplan and SIFT features that can be matched to a database constructed from other images. The camera's pose can be determined exactly from a match and information can be properly aligned so that it can overlay directly onto the phone's image display. In this paper, we present early results that demonstrate it is possible to realize this capability for a variety of indoor environments. Latency is shown to already be reasonable and likely to be improved by further optimizations. Our goal is to further explore the computational tradeoff between the server and phone client so as to achieve an acceptable latency of a few seconds.},
address = {Berlin, Heidelberg},
author = {Hile, Harlan and Borriello, Gaetano},
booktitle = {LoCA '07: Proceedings of the 3rd International Conference on Location- and Context-Awareness},
doi = {10.1007/978-3-540-75160-1\_5},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Information Overlay for Camera Phones in Indoor Environments - Hile, Borriello - 2007.pdf:pdf},
pages = {68--84},
publisher = {Springer-Verlag},
series = {Lecture Notes in Computer Science},
title = {{Information Overlay for Camera Phones in Indoor Environments}},
volume = {4718},
year = {2007}
}
@inproceedings{Sauer2001,
abstract = {We developed an augmented system reality system targeting image guidance for surgical procedures. The surgeon wears a video-see-through head mounted display that provides him with a stereo video view of the patient. The live video images are augmented with graphical representations of anatomical structures that are segemented from medical image data. The surgeon can se e.g. a tumor in its actual location inside the patient. This in-situ visualization, where the computer maps the image information onto the patient, promises the most direct, intuitive guidance for surgical procedures. in thispaper, we discuss technical details of teh system and describe a first pre-clinical evaluation. This fisrt evaluation is very positive and encourages us to get our ready for installation in UCLA’s iMRI operating room to perform clinical trials.},
author = {Sauer, F. and Khamene, A. and Bascle, B. and Rubino, G. J.},
booktitle = {MICCAI '01: Proceedings of the Medical Image Computing and Computer-Assisted Intervention},
doi = {10.1007/3-540-45468-3\_85},
editor = {Niessen, Wiro and Viergever, Max},
file = {:D$\backslash$:/\_Docs/mendeleyMain/A Head-Mounted Display System for Augmented Reality Image Guidance Towards Clinical Evaluation for iMRI-guided Nuerosurgery - Sauer et al. - 2001.pdf:pdf},
pages = {707--716},
publisher = {Springer Berlin / Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{A Head-Mounted Display System for Augmented Reality Image Guidance: Towards Clinical Evaluation for iMRI-guided Nuerosurgery}},
url = {http://dx.doi.org/10.1007/3-540-45468-3\_85},
volume = {2208},
year = {2001}
}
@inproceedings{Kato1999,
abstract = {We describe an augmented reality conferencing system which uses the overlay of virtual images on the real world. Remote collaborators are represented on Virtual Monitors which can be freely positioned about a user in space. Users can collaboratively view and interact with virtual objects using a shared virtual whiteboard. This is possible through precise virtual image registration using fast and accurate computer vision techniques and HMD calibration. We propose a method for tracking fiducial markers and a calibration method for optical see-through HMD based on the marker tracking.},
address = {San Francisco, CA},
author = {Kato, Hirokazu and Billinghurst, Mark},
booktitle = {IWAR '99: Proceedings of the 2nd IEEE and ACM International Workshop on Augmented Reality},
doi = {10.1109/IWAR.1999.803809},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Marker Tracking and HMD Calibration for a Video-based Augmented Reality Conferencing System - Kato, Billinghurst - 1999.pdf:pdf},
isbn = {0-7695-0359-4},
keywords = {Augmented Reality,CSCW,HMD Calibration,Optical See-Through AR,Teleconferencing},
pages = {85--94},
publisher = {IEEE Computer Society},
title = {{Marker Tracking and HMD Calibration for a Video-based Augmented Reality Conferencing System}},
url = {http://www.hitl.washington.edu/artoolkit/Papers/IWAR99.kato.pdf},
year = {1999}
}
@incollection{Rosten2005,
abstract = {Augmented reality (AR) provides an intuitive user interface to present information in the context of the real world. A common application is to overlay screen-aligned annotations for real world objects to create in-situ information displays for users. While the referenced object’s location is fixed in the view the annotating labels should be placed in such a way as to not interfere with other content of interest such as other labels or objects in the real world. We present a new approach to determine and track areas with less visual interest based on feature density and to automatically compute label layout from this information. The algorithm works in under 5ms per frame, which is fast enough that it can be used with existing AR systems. Moreover, it provides flexible constraints for controlling label placement behaviour to the application designer. The resulting overlays are demonstrated with a simple hand-held augmented reality system for information display in a lab environment.},
author = {Rosten, Edward and Reitmayr, Gerhard and Drummond, Tom},
booktitle = {Advances in Visual Computing},
doi = {10.1007/11595755\_36},
editor = {Bebis, George and Boyle, Richard and Koracin, Darko and Parvin, Bahram},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Real-time Video Annotations for Augmented Reality - Rosten, Reitmayr, Drummond - 2005.pdf:pdf},
pages = {294--302},
publisher = {Springer Berlin / Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{Real-time Video Annotations for Augmented Reality}},
volume = {3804},
year = {2005}
}
@article{Azuma2001,
abstract = {In 1997, Azuma published a survey on augmented reality (AR). Our goal is to complement, rather than replace, the original survey by presenting representative examples of the new advances. We refer one to the original survey for descriptions of potential applications (such as medical visualization, maintenance and repair of complex equipment, annotation, and path planning); summaries of AR system characteristics (such as the advantages and disadvantages of optical and video approaches to blending virtual and real, problems in display focus and contrast, and system portability); and an introduction to the crucial problem of registration, including sources of registration error and error-reduction strategies.},
author = {Azuma, Ronald T. and Baillot, Yohan and Behringer, Reinhold and Feiner, Steven K. and Julier, Simon and MacIntyre, Blair},
doi = {10.1109/38.963459},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Recent advances in augmented reality - Azuma et al. - 2001.pdf:pdf},
issn = {02721716},
journal = {IEEE Computer Graphics and Applications},
number = {6},
pages = {34--47},
title = {{Recent advances in augmented reality}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=963459},
volume = {21},
year = {2001}
}
@misc{Morris2003,
abstract = {This paper summarizes findings from the first phase of Intel’s Proactive Health research: uncovering needs brought on by cognitive impairment that can be addressed through home computing technologies. This project was motivated by the demo- graphic shift of our aging population, the consequent health care crisis, and the particular obstacles that cognitive impairment poses to independent living. Although it is clear that the general needs of elders with cognitive impairments—for invisible, intui- tive support and assessment—are well suited to innovative computing solutions, there are many unanswered questions about how these solutions can fit into everyday environments and routines. The ethnographic needs inquiry included expert inter- views, focus groups, and contextualized interviews with cognitively impaired individuals and their informal care networks in 44 households in five U.S. regions. Emerging themes from this ethnographic research fall into four major categories: prevention and detection, managing everyday life, social connectedness, and identity affirmation. Technology opportunities that map onto these categories include embedded assessment, contextual prompting, social synchronization, remote wellness checking, and life span mapping. Concepts and demos that embody these solution capabilities are also described, along with directions for future research.},
author = {Morris, Margaret and Lundell, Jay},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Ubiquitous Computing for Cognitive Decline Findings from Intel's Proactive Health Research - Morris, Lundell - 2003.pdf:pdf},
pages = {11},
title = {{Ubiquitous Computing for Cognitive Decline: Findings from Intel's Proactive Health Research}},
url = {http://www.alz.org/national/documents/Intel\_UbiquitousComputing.pdf},
year = {2003}
}
@techreport{Acosta2009,
author = {Acosta, Daisy and Wortmann, Marc},
file = {:D$\backslash$:/\_Docs/mendeleyMain/World Alzheimer Report 2009 - Acosta, Wortmann - 2009.pdf:pdf},
institution = {Alzheimer's Disease International},
pages = {24},
title = {{World Alzheimer Report 2009}},
url = {http://www.alz.co.uk/research/files/WorldAlzheimerReport-ExecutiveSummary.pdf},
year = {2009}
}
@techreport{International1994,
abstract = {This document gives information on dementia in a simple way to families and the public at large. It presents useful and concrete information on how to cope with the disease, and on how to set up self help and mutual support activities for families with a member affected by dementia. It was produced with the cooperation of members of Alzheimer’s Disease International.},
address = {Geneva},
author = {International, Alzheimer's Disease},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Help for caregivers - International - 1994.pdf:pdf},
institution = {World Health Organization},
issn = {1532-0650},
keywords = {Alzheimer’s disease,community support,dementia,family support groups,self help groups},
month = jun,
number = {11},
pages = {26},
title = {{Help for caregivers}},
url = {http://www.alz.co.uk/adi/pdf/helpforcaregivers.pdf},
volume = {83},
year = {1994}
}
@techreport{Wimo2010a,
author = {Wimo, Anders and Prince, Martin},
file = {:D$\backslash$:/\_Docs/mendeleyMain/World Alzheimer Report 2010 The Global Economic Impact of Dementia - Wimo, Prince - 2010.pdf:pdf},
institution = {Alzheimer's Disease International},
pages = {56},
title = {{World Alzheimer Report 2010: The Global Economic Impact of Dementia}},
url = {http://www.alz.co.uk/research/files/WorldAlzheimerReport2010.pdf},
year = {2010}
}
@inproceedings{OConaire2009,
abstract = {The SenseCam is a wearable camera that automatically takes photos of the wearer's activities, generating thousands of images per day. Automatically organising these images for efficient search and retrieval is a challenging task, but can be simplified by providing semantic information with each photo, such as the wearer's location during capture time. We propose a method for automatically determining the wearer's location using an annotated image database, described using SURF interest point descriptors. We show that SURF out-performs SIFT in matching SenseCam images and that matching can be done efficiently using hierarchical trees of SURF descriptors. Additionally, by re-ranking the top images using bi-directional SURF matches, location matching performance is improved further.},
author = {{\'{O} Conaire}, Ciar\'{a}n and Blighe, Michael and O'Connor, Noel E.},
booktitle = {MMM '09: Proceedings of the 15th International Multimedia Modeling Conference},
doi = {10.1007/978-3-540-92892-8\_4},
file = {:D$\backslash$:/\_Docs/mendeleyMain/SenseCam Image Localisation using Hierarchical SURF Trees - \'{O} Conaire, Blighe, O'Connor - 2009.pdf:pdf},
isbn = {978-3-540-92891-1},
keywords = {image matching,localisation,sensecam,surf},
pages = {15--26},
title = {{SenseCam Image Localisation using Hierarchical SURF Trees}},
url = {http://www.eeng.dcu.ie/~oconaire/papers/sensecam\_mmm09.pdf},
volume = {5371},
year = {2009}
}
@article{Bourgeois2003,
abstract = {The purpose of this study was to compare the effectiveness of two training approaches, Spaced Retrieval (SR) and a modified Cueing Hierarchy (CH), for teaching persons with dementia a strategy goal involving an external memory aid. Twenty-five persons with dementia living in either community or nursing home settings received training on two individual-specific strategy goals, one with each training approach. Results revealed that significantly more goals were attained using SR procedures than CH, but that a majority of participants learned to use external aids using both strategies. There were no significant differences in the number of sessions required to master goals in either condition; however, significantly more SR goals were maintained at both 1-week and 4-months post-training compared to CH goals. Mental status was not significantly correlated with goal mastery, suggesting the potential benefits of strategy training beyond the early stages of dementia.},
author = {Bourgeois, Michelle S. and Camp, Cameron and Roseb, Miriam and Whitea, Blanche and Maloneb, Megan and Carrb, Jaime and Rovinec, Michael},
doi = {10.1016/S0021-9924(03)00051-0},
file = {:D$\backslash$:/\_Docs/mendeleyMain/A comparison of training strategies to enhance use of external aids by persons with dementia - Bourgeois et al. - 2003.pdf:pdf},
issn = {00219924},
journal = {Journal of Communication Disorders},
month = oct,
number = {5},
pages = {361--378},
title = {{A comparison of training strategies to enhance use of external aids by persons with dementia}},
url = {http://linkinghub.elsevier.com/retrieve/pii/S0021992403000510},
volume = {36},
year = {2003}
}
@techreport{,
address = {Chicago, IL},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Alzheimer's Disease Facts and Figures 2007 - Unknown - 2007.pdf:pdf},
institution = {Alzheimer's Association},
pages = {30},
title = {{Alzheimer's Disease Facts and Figures 2007}},
year = {2007}
}
@techreport{Janatra2008,
abstract = {Image-based features are now commonly used to perform matching between objects in a query image and objects in database images. Previous studies have been done to apply these features in a real-time cell-phone-based mobile augmented reality (MAR) to recognize buildings. The database feature sets need to be transmitted from the database to the cell phone. In order to minimize cost associated to this transmission, a compression scheme should be developed. This document investigates the effectiveness of several possible compression algorithms. Initially, principal component analysis (PCA) is applied to maximize the coding gain. The transformed coefficients are quantized using a uniform quantizer and entropy-coded using several known entropy coding schemes. By varying the quantization level, the relationship between achievable bit rate, distortion, and query-to-database image matching rate could be determined.},
author = {Janatra, Ivan and Zhang, June},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Compression of Image-Based Features - Janatra, Zhang - 2008.pdf:pdf},
institution = {Stanford University},
keywords = {Data Compression,Image-based Features,Mobile Augmented Reality (MAR)},
pages = {5},
title = {{Compression of Image-Based Features}},
url = {http://www.stanford.edu/~dmchen/documents/project\_mentor\_ee398a\_ivan\_june\_report.pdf},
volume = {398},
year = {2008}
}
@misc{,
file = {:D$\backslash$:/\_Docs/mendeleyMain/Home Safety for People with Alzheimer's Disease - Unknown - 2011.pdf:pdf},
pages = {44},
title = {{Home Safety for People with Alzheimer's Disease}},
url = {http://www.nia.nih.gov/NR/rdonlyres/A86CA4FA-CAA9-4E8A-8B38-F5887EFABF2B/0/HomeSafetyupdateFINAL32310.pdf},
year = {2011}
}
@unpublished{Chen2008,
abstract = {Automatic recognition of objects in images now typically relies on robust local image features. For scalable search through a large database, image features are quantized using a scalable vocabulary tree (SVT) which forms a large visual dictionary. In this project, we design support vector machine (SVM) classifiers for tree histograms calculated from SVT quantization. We explore several practical ker- nels that naturally capture the statistics of image features. A baseline Naive Bayes classifier for tree histograms is also created for compar- ison. After Naive Bayes or SVM classification, we further perform a geometric verification step to avoid false positive matches, using either affine or scale consistency check. The Naive Bayes and SVM classifiers and the geometric verification algorithms are tested on two real image databases with challenging image distortions.},
author = {Chen, David and Makar, Mina and Tsai, Shang-Hsuan},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Scalable Object Recognition Using Support Vector Machines - Chen, Makar, Tsai - 2008.pdf:pdf},
institution = {Stanford University},
pages = {5},
title = {{Scalable Object Recognition Using Support Vector Machines}},
url = {http://www-leland.stanford.edu/class/cs229/proj2008/ChenMakarTsai-ScalableObjectRecognitionUsingSupportVectorMachines.pdf},
year = {2008}
}
@article{Clare2004,
author = {Clare, Linda and Woods, Robert},
doi = {10.1080/09602010443000074},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Cognitive training and cognitive rehabilitation for people with early-stage Alzheimer's disease A review - Clare, Woods - 2004.pdf:pdf},
issn = {0960-2011},
journal = {Neuropsychological Rehabilitation},
month = sep,
number = {4},
pages = {385--401},
title = {{Cognitive training and cognitive rehabilitation for people with early-stage Alzheimer's disease: A review}},
url = {http://www.informaworld.com/openurl?genre=article\&doi=10.1080/09602010443000074\&magic=crossref||D404A21C5BB053405B1A640AFFD44AE3},
volume = {14},
year = {2004}
}
@inproceedings{Botterill2008,
abstract = {This paper describes a new scalable scheme for the real-time detection of identical scenes for mobile robot localisation, allowing fast retraining to learn new environments. It uses the image bag-of-words algorithm, where images are described by a set of local feature descriptors mapped to a discrete set of dasiaimage wordspsila. This scheme uses descriptors consisting of a combination of a descriptor of shape (SURF) and a hue histogram, and this combination is shown to perform better than either descriptor alone. K-medoids clustering is shown to be suitable for quantising these composite descriptors (or any arbitrary descriptor) into visual words. The scheme can identify in real-time (0.036 seconds per query) multiple images of the same object from a standard dataset of 10200 images, showing robustness to differences in perspective and changes in the scene, and can detect loops in a video stream from a mobile robot.},
address = {Christchurch},
author = {Botterill, Tom and Mills, Steven and Green, Richard},
booktitle = {IVCNZ '08: Proceedings of the 23rd International Conference Image and Vision Computing New Zealand},
doi = {10.1109/IVCNZ.2008.4762067},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Speeded-up Bag-of-Words algorithm for robot localisation through scene recognition - Botterill, Mills, Green - 2008.pdf:pdf},
isbn = {978-1-4244-2582-2},
keywords = {bag-of-words,image search,scene recognition,visual navigation},
month = nov,
number = {Figure 1},
pages = {1--6},
publisher = {IEEE Computer Society},
title = {{Speeded-up Bag-of-Words algorithm for robot localisation through scene recognition}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4762067},
year = {2008}
}
@article{Behrens2010,
abstract = {In many algorithms the registration of image pairs is done by feature point matching. After the feature detection is performed, all extracted interest points are usually used for the registration process without further feature point distribution analysis. However, in the case of small and sparse sets of feature points of fixed size, suitable for real-time image mosaicking algorithms, a uniform spatial feature distribution across the image becomes relevant. Thus, in this paper we discuss and analyze algorithms which provide different spatial point distributions from a given set of SURF features. The evaluations show that a more uniform spatial distribution of the point matches results in lower image registration errors, and is thus more beneficial for fast image mosaicking algorithms.},
author = {Behrens, Alexander and R\"{o}llinger, Hendrik},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Analysis of Feature Point Distributions for Fast Image Mosaicking Algorithms Mosaicking algorithm - Behrens, R\"{o}llinger - 2010.pdf:pdf},
journal = {Acta Polytechnica Journal of Advanced Engineering},
keywords = {feature detection,image mosaicking,image registration,point distribution},
number = {4},
pages = {12--18},
title = {{Analysis of Feature Point Distributions for Fast Image Mosaicking Algorithms Mosaicking algorithm}},
url = {http://www.lfb.rwth-aachen.de/files/publications/2010/BEH10g.pdf},
volume = {50},
year = {2010}
}
@article{Beard2009,
abstract = {Rhetoric referring to Alzheimer's disease as ‘the never ending funeral’ or ‘a slow unraveling of the self’ implies that diagnosed individuals and their families alike are victims of a dreaded disease. Data gathered from web-based surveys with twenty-seven individuals with dementia demonstrate howsome persons living with the condition actively negotiate their everyday lives to counter such pejorative assumptions. Grounded theory methods were used to consolidate textual data into overarching themes. Findings depict persons with dementia who do not experience an inherent ‘loss of self’ but rather consciously strive to incorporate a ‘manageable disability’ into their existing identities. Respondents give numerous examples of how they can and do live with dementia. These data portray an empowered identity that suggests the need for a reframing of dementia to challenge the normative victim-orientation and the social disadvantages of such biomedical reductionism.},
author = {Beard, Ren\'{e}e L. and Knauss, Jenny and Moyer, Don},
doi = {10.1016/j.jaging.2008.01.002},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Managing disability and enjoying life How we reframe dementia through personal narratives - Beard, Knauss, Moyer - 2009.pdf:pdf},
issn = {08904065},
journal = {Journal of Aging Studies},
keywords = {Alzheimer's,Dementia,Empowered identities,Enrichment,Narrative},
month = dec,
number = {4},
pages = {227--235},
publisher = {Elsevier Inc.},
title = {{Managing disability and enjoying life: How we reframe dementia through personal narratives}},
url = {http://linkinghub.elsevier.com/retrieve/pii/S0890406508001412},
volume = {23},
year = {2009}
}
@unpublished{Chu2007,
abstract = {In this paper, a feature-detecting algorithm to identify paintings from mobile phone camera images is presented. The algorithm is capable of identifying paintings from noisy camera-phone images. Even with typical blurring, rotation, and a small amount of perspective distortion that would occur in a controlled museum setting, by training on a set of standard painting images, the paintings can be correctly identified. With calculation-intensive portions of the code in C, the algorithm runs efficiently as well as accurately. All 99 test images were detected correctly, with an average runtime of 0.9228 seconds on SCIEN machines. I.},
author = {Chu, Eric and Hsu, Erin and Yu, Sandy},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Image-Guided Tours Fast-Approximated SIFT with U-SURF Freatures - Chu, Hsu, Yu - 2007.pdf:pdf},
institution = {Stanford University},
pages = {7},
title = {{Image-Guided Tours: Fast-Approximated SIFT with U-SURF Freatures}},
url = {http://www.stanford.edu/class/ee368/Project\_07/reports/ee368group05.pdf},
year = {2007}
}
@inproceedings{Brown2002,
abstract = {This paper approaches the problem of finding correspondences between images in which there are large changes in viewpoint, scale and illumination. Recent work has shown that scale-space 'interest points' may be found with good repeatability in spite of such changes. Furthermore, the high entropy of the surrounding image regions means that local descriptors are highly discriminative for matching. For descriptors at interest points to be robustly matched between images, they must be as far as possible invariant to the imaging process.},
author = {Brown, Matthew and Lowe, David G.},
booktitle = {BMVC '02: Proceedings of The 13th British Machine Vision Conference},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Invariant Features from Interest Point Groups - Brown, Lowe - 2002.pdf:pdf},
pages = {656--665},
title = {{Invariant Features from Interest Point Groups}},
url = {http://www.cs.ubc.ca/~lowe/papers/brown02.pdf },
year = {2002}
}
@inproceedings{Nister2006,
abstract = {A recognition scheme that scales efficiently to a large number of objects is presented. The efficiency and quality is exhibited in a live demonstration that recognizes CD-covers from a database of 40000 images of popular music CD’s. The scheme builds upon popular techniques of indexing descriptors extracted from local regions, and is robust to background clutter and occlusion. The local region descriptors are hierarchically quantized in a vocabulary tree. The vocabulary tree allows a larger and more discriminatory vocabulary to be used efficiently, which we show experimentally leads to a dramatic improvement in retrieval quality. The most significant property of the scheme is that the tree directly defines the quantization. The quantization and the indexing are therefore fully integrated, essentially being one and the same. The recognition quality is evaluated through retrieval on a database with ground truth, showing the power of the vocabulary tree approach, going as high as 1 million images.},
author = {Nist\'{e}r, David and Stew\'{e}nius, Henrik},
booktitle = {CVPR '06: Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition},
doi = {10.1109/CVPR.2006.264},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Scalable Recognition with a Vocabulary Tree - Nist\'{e}r, Stew\'{e}nius - 2006.pdf:pdf},
isbn = {0-7695-2597-0},
pages = {2161--2168},
publisher = {IEEE},
title = {{Scalable Recognition with a Vocabulary Tree}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=1641018},
year = {2006}
}
@article{Velmurugan2011,
abstract = {Content-Based Image Retrieval (CBIR) is a challenging task which retrieves the similar images from the large database. Most of the CBIR system uses the low-level features such as colour, texture and shape to extract the features from the images. In Recent years the Interest points are used to extract transformations. retrieval accuracy. SURF is fast and robust interest points detector/descriptor which is used in many computer vision applications. To improve the performance of the system the SURF is combined with Colour Moments since SURF works only on gray scale images. The KD-tree with the Best Bin First (BBF) search algorithm is to index and match the similarity etween the features of the images. Finally, Voting Scheme algorithm is used to rank and retrieve the matched images from the database.},
author = {Velmurugan, K. and Baboo, S. Santhosh},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Content-Based Image Retrieval using SURF and Colour Moments - Velmurugan, Baboo - 2011.pdf:pdf},
journal = {Global Journal of Computer Science and Technology},
keywords = {SURF,colour moments,content-based image retrieval (CBIR),kd tree},
number = {10},
pages = {1--4},
title = {{Content-Based Image Retrieval using SURF and Colour Moments}},
url = {http://globaljournals.org/GJCST\_Volume11/1-Content-Based-Image-Retrieval-using-SURF.pdf},
volume = {11},
year = {2011}
}
@inproceedings{Sample2001,
abstract = {K-d trees have been widely studied, yet their complete advantages are often not realized due to ineffective search implementations and degrading performance in high dimensional spaces. We outline an effective search algorithm for k-d trees that combines an optimal depth-first branch and bound (DFBB) strategy with a unique method for path ordering and pruning. This technique was developed for improving nearest neighbor (NN) search, but has also proven effective for k-NN and approximate k-NN queries.},
author = {Sample, Neal and Haines, Matthew and Arnold, Mark and Purcell, Timothy},
booktitle = {CSCC '01: Proceedings of the 5th WSES/IEEE World Multiconference on Circuits, Systems, Communications \& Computers},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Optimizing Search Strategies in k-d Trees - Sample et al. - 2001.pdf:pdf},
keywords = {DFBB,high dimensionality,k-NN,k-d trees,nearest neighbor,search},
title = {{Optimizing Search Strategies in k-d Trees}},
url = {http://graphics.stanford.edu/~tpurcell/pubs/search.pdf},
year = {2001}
}
@article{Quittre2005,
abstract = {A dedicated training program for teaching a patient with Alzheimer's disease to independently use an agenda is presented. This training capitalises on preserved cognitive abilities and incorporates principles from learning theories. This case study reports the effective use of a memory book for daily life activities and of a digital clock for time reorientation.},
author = {Quittre, Anne and Oliver, Catherine and Salmon, Eric},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Compensating strategies for impaired episodic memory and time orientation in a patient with Alzheimer's disease - Quittre, Oliver, Salmon - 2005.pdf:pdf},
journal = {Acta Neurologica Belgica},
keywords = {alzheimer's disease,cognitive rehabilitation,memory deficit,prosthetic aid},
number = {1},
pages = {30--38},
title = {{Compensating strategies for impaired episodic memory and time orientation in a patient with Alzheimer's disease}},
url = {http://www.actaneurologica.be/acta/download/2005-1/05-Quittre.pdf},
volume = {105},
year = {2005}
}
@inproceedings{Huang2010,
abstract = {This paper describes a method to stabilize video for vehicular applications based on feature analysis. An investigation on camera motion model is conducted. Harris features are extracted under the proposed resolution adaptation scheme. Besides, features are described with SURF-like descriptor. For feature matching, KD-tree with best-bin-first search significantly reduces the matching time. A damping filer is utilized to model and predict the unwanted oscillation. 93.1\% correct rate in average is achieved in divergent driving conditions. Only 0.114 second is required to process a frame at resolution 1280×960. The provided benchmark shows outperformance of the proposed method.},
address = {Hong Kong, Hong Kong},
author = {Huang, Keng-Yen and Tsai, Yi-Min and Tsai, Chih-Chung and Chen, Liang-Gee},
booktitle = {ICIP '10: Proceedings of the 17th IEEE International Conference on Image Processing},
doi = {10.1109/ICIP.2010.5653052},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Video stabilization for vehicular applications using SURF-like descriptor and KD-tree - Huang et al. - 2010.pdf:pdf},
isbn = {978-1-4244-7992-4},
keywords = {KD-tree,Video stabilization,and feature extraction,damping filter,motion analysis},
month = sep,
number = {5},
pages = {3517--3520},
publisher = {IEEE Computer Society},
title = {{Video stabilization for vehicular applications using SURF-like descriptor and KD-tree}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5653052},
year = {2010}
}
@inproceedings{Szymkowiak2004,
abstract = {Independence is a defining attribute of being an adult. We guard and preserve our right to determine how and where we live our lives, and the routines that we maintain. These rights are fundamental to the ‘independent living’ and ‘care in the community’ movements of recent years. Loss of this independence is not only demoralising for all the people directly and indirectly involved, but also increases care costs to the individuals, their families, and to the wider community. Conversely, if independence can be maintained, a reduction of the workload of professional carers and reduced health care costs is possible. One condition that can severely affect the degree of independence is the extent to which people can organise, plan and, crucially, remember to carry out even simple everyday tasks. Memory dysfunction can hamper the quality of everyday lives of memory-impaired individuals, leaving them extremely dependent on carers (Wilson et al., 2003). In this paper we describe issues in the development of a memory aid system with remote communication to re-create independence for memory-impaired, elderly people and present data from an evaluation of this system.},
address = {Cambridge, UK},
author = {Szymkowiak, A. and Morrison, Kenny and Shah, P. and Gregor, Peter and Newell, A. and Wilson, Barbara A. and Schofield, S.},
booktitle = {CWUAAT '04: Proceedings of the 2nd Cambridge Workshop on Universal Access and Assistive Technology},
doi = {10.1.1.106.7700},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Memojog – An Interactive Memory Aid with Remote Communication - Szymkowiak et al. - 2004.pdf:pdf},
pages = {15--24},
title = {{Memojog – An Interactive Memory Aid with Remote Communication}},
url = {http://rehab-www.eng.cam.ac.uk/cwuaat/04/03-memojog-cwuaat04.pdf},
year = {2004}
}
@inproceedings{Morrison2004,
abstract = {Memory problems are often associated with ageing and are among the most common effects of brain injury. Such problems can severely disrupt daily life and put huge strain on family members and carers. Electronic devices have been used successfully to provide short and timely reminders to memory-impaired individuals. The Memojog project has developed and evaluated a mobile, interactive communication and memory-aid system with elderly and memory-impaired people. The system utilizes current and easily available technology such as the internet and GPRS mobile telephony. This paper will look at the design as well as the successes and limitations of the Memojog system.},
address = {Glasgow, UK},
author = {Morrison, Kenny and Szymkowiak, Andrea and Gregor, Peter},
booktitle = {MobileHCI '04: Proceedings of the 6th International Conference on Mobile Human-Computer Interaction},
doi = {10.1007/978-3-540-28637-0\_61},
editor = {Brewster, Stephen and Dunlop, Mark},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Memojog – An Interactive Memory Aid Incorporating Mobile Based Technologies - Morrison, Szymkowiak, Gregor - 2004.pdf:pdf},
pages = {59--73},
publisher = {Springer Berlin / Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{Memojog – An Interactive Memory Aid Incorporating Mobile Based Technologies}},
url = {http://www.springerlink.com/content/6n06xxf681effm22/fulltext.pdf},
volume = {3160},
year = {2004}
}
@article{Herrmann1996,
abstract = {An intention requires us to carry out an action at a certain time or in an unscheduled time frame. In recent years, palmtop computerized devices with special functions have been developed that enable individuals to better remember their intentions. People record their intention in the device and are later reminded of their intention by a warning signal, such as an audible beep, that is presented along with a message about what is to be done. The present research investigated the psychological effects of the warning signals provided by palmtop reminding devices. Four experiments demonstrated that the effectiveness of an audible warning signal in the form of a signal was greatest early in the day. The interval between the signal and time to carry out the act, called here the anticipatory lag, did not significantly influence the timeliness of responses and remembering.},
author = {Herrmann, Douglas and Sheets, Virgil and Wells, Justine and Yoder, Carol},
doi = {10.1007/BF01174604},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Palmtop Computerized Reminding Devices The Effectiveness of the Temporal Properties of Warning Signals - Herrmann et al. - 1996.pdf:pdf},
issn = {0951-5666},
journal = {AI \& Society},
keywords = {externalization of the mind,palmtop reminding devices,personal data assistant,remembering intentions,signaling intentions,signals,temporal variables},
month = sep,
number = {3-4},
pages = {289--302},
title = {{Palmtop Computerized Reminding Devices: The Effectiveness of the Temporal Properties of Warning Signals}},
url = {http://www.springerlink.com/index/10.1007/BF01174604},
volume = {10},
year = {1996}
}
@inproceedings{Takacs2007,
abstract = {We propose a novel, low-complexity, tracking scheme that uses motion vectors directly from a video coder. We compare our tracking algorithm against ground truth data, and show that we can achieve a high level of accuracy, even though the motion vectors are rate-distortion optimized and do not represent true motion. We develop a framework for tracking in video sequences with various GOP structures. Such a scheme would find applications in the context of Mobile Augmented Reality. The proposed feature tracking algorithm can significantly reduce the required rate of feature extraction and matching.},
address = {Washington, DC},
author = {Takacs, Gabriel and Chandrasekhar, Vijay and Girod, Bernd and Grzeszczuk, Radek},
booktitle = {ISMAR '07: Proceedings of the 6th IEEE and ACM International Symposium on Mixed and Augmented Reality},
doi = {10.1109/ISMAR.2007.4538838},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Feature Tracking for Mobile Augmented Reality Using Video Coder Motion Vectors - Takacs et al. - 2007.pdf:pdf},
isbn = {978-1-4244-1749-0},
month = nov,
pages = {1--4},
publisher = {IEEE Computer Society},
title = {{Feature Tracking for Mobile Augmented Reality Using Video Coder Motion Vectors}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4538838},
year = {2007}
}
@inproceedings{Rukzio2009,
abstract = {Important drawbacks of map-based navigation applications for mobile phones are their small screen size and that users have to associate the information provided by the mobile phone with the real word. Therefore, we designed, implemented and evaluated the Rotating Compass – a novel public display for pedestrian navigation. Here, a floor display continuously shows different direction. We report an directions (in a clockwise order) and the mobile phone informs the user when their desired direction is indicated. To inform the user, the mobile phone vibrates in synchronization with the indicated outdoor study that compares a conventional application running on a mobile paper map, a navigation device, navigation information provided by a public display, and the Rotating Compass. The results provide clear evidence of the advantages of the new interaction technique when considering task completion time, context switches, disorientation events, usability satisfaction, workload and multi-user support.},
address = {Boston, MA},
author = {Rukzio, Enrico and M\"{u}ller, Michael and Hardy, Robert},
booktitle = {CHI '09: Proceedings of the 28th ACM Conference on Human Factors in Computing Systems},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Design, Implementation and Evaluation of a Novel Public Display for Pedestrian Navigation The Rotating Compass - Rukzio, M\"{u}ller, Hardy - 2009.pdf:pdf},
pages = {113--122},
publisher = {ACM Press},
title = {{Design, Implementation and Evaluation of a Novel Public Display for Pedestrian Navigation: The Rotating Compass}},
year = {2009}
}
@inproceedings{Valgren,
abstract = {Local feature matching has become a commonly used method to compare images. For mobile robots, a reliable method for comparing images can constitute a key component for localization and loop closing tasks. In this paper, we address the issues of outdoor appearance-based topological localization for a mobile robot over time. Our data sets, each consisting of a large number of panoramic images, have been acquired over a period of nine months with large seasonal changes (snow- covered ground, bare trees, autumn leaves, dense foliage, etc.). Two different types of image feature algorithms, SIFT and the more recent SURF, have been used to compare the images. We show that two variants of SURF, called U-SURF and SURF-128, outperform the other algorithms in terms of accuracy and speed.},
author = {Valgren, Christoffer and Lilienthal, Achim},
booktitle = {ECMR '07: Proceedings of the European Conference on Mobile Robots},
file = {:D$\backslash$:/\_Docs/mendeleyMain/SIFT, SURF and Seasons Long-term Outdoor Localization Using Local Features - Valgren, Lilienthal - 2007.pdf:pdf},
pages = {1--6},
publisher = {Citeseer},
title = {{SIFT, SURF and Seasons: Long-term Outdoor Localization Using Local Features}},
url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.84.2497\&amp;rep=rep1\&amp;type=pdf},
volume = {128},
year = {2007}
}
@article{Luo2009,
abstract = {This paper compares three robust feature detection methods, they are, Scale Invariant Feature Transform (SIFT), Principal Component Analysis (PCA) -SIFT and Speeded Up Robust Features (SURF). Lowe presented SIFT [1], which was successfully used in recognition, stitching and many other applications because of its robustness. Yan Ke [2] gave a change of SIFT by using PCA to normalize the gradient patch instead of histogram. H. Bay [3] presented a faster method for SURF, which used Fast-Hessian detector. The performance of the three methods is compared for scale changes, rotation , blur, illumination changes and affine transformations, all of which uses repeatability as an evaluation measurement. Additionally, RANSAC is used to reject the inconsistent matches [4]. SIFT presents its stability in most situation except rotation and illumination changes. SURF is the fastest one with good performance as the same as SIFT, PCA-SIFT shows its advantages in rotation, blur and illumination changes.},
author = {Luo, Juan and Oubong, Gwun},
file = {:D$\backslash$:/\_Docs/mendeleyMain/A Comparison of SIFT, PCA-SIFT and SURF - Luo, Oubong - 2009.pdf:pdf},
journal = {International Journal of Image Processing (IJIP)},
keywords = {pca-sift,robust detectors,sift,surf},
number = {4},
pages = {143--152},
title = {{A Comparison of SIFT, PCA-SIFT and SURF}},
url = {http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue4/IJIP-51.pdf},
volume = {3},
year = {2009}
}
@techreport{Furgale2009,
author = {Furgale, Paul and Tong, Chi Hay and Kenway, Gaetan},
file = {:D$\backslash$:/\_Docs/mendeleyMain/ECE1724 Project Speeded-Up Speeded-Up Robust Features - Furgale, Tong, Kenway - 2009.pdf:pdf},
institution = {University of Toronto},
pages = {18},
title = {{ECE1724 Project Speeded-Up Speeded-Up Robust Features}},
url = {http://asrl.utias.utoronto.ca/~ptf/docs/gpusurf\_report09.pdf},
year = {2009}
}
@article{BAY2008,
abstract = {This article presents a novel scale- and rotation-invariant detector and descriptor, coined SURF (Speeded-Up Robust Features). SURF approximates or even outperforms previously proposed schemes with respect to repeatability, distinctiveness, and robustness, yet can be computed and compared much faster. This is achieved by relying on integral images for image convolutions; by building on the strengths of the leading existing detectors and descriptors (specifically, using a Hessian matrix-based measure for the detector, and a distribution-based descriptor); and by simplifying these methods to the essential. This leads to a combination of novel detection, description, and matching steps. The paper encompasses a detailed description of the detector and descriptor and then explores the effect of the most important parameters.We conclude the article with SURF’s application to two challenging, yet converse goals: camera calibration as a special case of image registration, and object recognition. Our experiments underline SURF’s usefulness in a broad range of topics in computer vision.},
author = {Bay, Herbert and Ess, Andreas and Tuytelaars, Tinne and {Van Gool}, Luc},
doi = {10.1016/j.cviu.2007.09.014},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Speeded-Up Robust Features (SURF) - Bay et al. - 2008.PDF:PDF},
issn = {10773142},
journal = {Computer Vision and Image Understanding},
keywords = {camera calibration,feature description,interest points,local features,object recognition},
month = jun,
number = {3},
pages = {346--359},
title = {{Speeded-Up Robust Features (SURF)}},
url = {http://linkinghub.elsevier.com/retrieve/pii/S1077314207001555},
volume = {110},
year = {2008}
}
@inproceedings{Jegou2009,
abstract = {One of the main limitations of image search based on bag-of-features is the memory usage per image. Only a few million images can be handled on a single machine in reasonable response time. In this paper, we first evaluate how the memory usage is reduced by using lossless index compression. We then propose an approximate representation of bag-of-features obtained by projecting the corresponding histogram onto a set of pre-defined sparse projection functions, producing several image descriptors. Coupled with a proper indexing structure, an image is represented by a few hundred bytes. A distance expectation criterion is then used to rank the images. Our method is at least one order of magnitude faster than standard bag-of-features while providing excellent search quality.},
address = {Kyoto},
author = {J\'{e}gou, Herv\'{e} and Douze, Matthijs and Schmid, Cordelia},
booktitle = {ICCV '09: Proceedings of the 12th IEEE International Conference on Computer Vision},
doi = {10.1109/ICCV.2009.5459419},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Packing bag-of-features - J\'{e}gou, Douze, Schmid - 2009.PDF:PDF},
isbn = {978-1-4244-4420-5},
month = sep,
pages = {2357--2364},
publisher = {IEEE Computer Society},
title = {{Packing bag-of-features}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5459419},
year = {2009}
}
@phdthesis{Gassmann2010,
author = {Gassmann, Alexander},
file = {:D$\backslash$:/\_Docs/mendeleyMain/GuidAce - Augmented Reality on Android - Gassmann - 2010.pdf:pdf},
pages = {53},
school = {ETH Z\"{u}rich},
title = {{GuidAce - Augmented Reality on Android}},
url = {www.vision.ee.ethz.ch/teaching/sada/sadalink/reports/biwi\_00339.pdf},
year = {2010}
}
@inproceedings{Ta2009,
abstract = {We present an efficient algorithm for continuous image recognition and feature descriptor tracking in video which operates by reducing the search space of possible interest points inside of the scale space image pyramid. Instead of performing tracking in 2D images, we search and match candidate features in local neighborhoods inside the 3D im- age pyramid without computing their feature descriptors. The candidates are further validated by fitting to a motion model. The resulting tracked interest points are more re- peatable and resilient to noise, and descriptor computa- tion becomes much more efficient because only those areas of the image pyramid that contain features are searched. We demonstrate our method on real-time object recognition and label augmentation running on a mobile device.},
author = {Ta, Duy-Nguyen and Chen, Wei-Chao and Gelfand, Natasha and Pulli, Kari},
booktitle = {CVPR '09: Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition},
doi = {10.1109/CVPR.2009.5206831},
file = {:D$\backslash$:/\_Docs/mendeleyMain/SURFTrac Efficient tracking and continuous object recognition using local feature descriptors - Ta et al. - 2009.pdf:pdf},
isbn = {978-1-4244-3992-8},
month = jun,
pages = {2937--2944},
publisher = {IEEE Computer Society},
title = {{SURFTrac: Efficient tracking and continuous object recognition using local feature descriptors}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5206831},
year = {2009}
}
@inproceedings{Gammeter2010,
abstract = {In this paper we present a system for mobile augmented reality (AR) based on visual recognition. We split the tasks of recognizing an object and tracking it on the user's screen into a server-side and a client-side task, respectively. The capabilities of this hybrid client-server approach are demonstrated with a prototype application on the Android platform, which is able to augment both stationary (landmarks) and non stationary (media covers) objects. The database on the server side consists of hundreds of thousands of landmarks, which is crawled using a state of the art mining method for community photo collections. In addition to the landmark images, we also integrate a database of media covers with millions of items. Retrieval from these databases is done using vocabularies of local visual features. In order to fulfill the real-time constraints for AR applications, we introduce a method to speed-up geometric verification of feature matches. The client-side tracking of recognized objects builds on a multi-modal combination of visual features and sensor measurements. Here, we also introduce a motion estimation method, which is more efficient and precise than similar approaches. To the best of our knowledge this is the first system, which demonstrates a complete pipeline for augmented reality on mobile devices with visual object recognition scaled to millions of objects combined with real-time object tracking.},
address = {San Francisco, CA},
author = {Gammeter, Stephan and Gassmann, Alexander and Bossard, Lukas and Quack, Till and {Van Gool}, Luc},
booktitle = {CVPRW '10: Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops},
doi = {10.1109/CVPRW.2010.5543248},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Server-side object recognition and client-side object tracking for mobile augmented reality - Gammeter et al. - 2010.pdf:pdf},
isbn = {978-1-4244-7029-7},
month = jun,
number = {C},
pages = {1--8},
publisher = {IEEE},
title = {{Server-side object recognition and client-side object tracking for mobile augmented reality}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5543248},
year = {2010}
}
@inproceedings{Wagner2008,
abstract = {In this paper we present two techniques for natural feature tracking in real-time on mobile phones. We achieve interactive frame rates of up to 20Hz for natural feature tracking from textured planar targets on current-generation phones. We use an approach based on heavily modified state-of-the-art feature descriptors, namely SIFT and Ferns. While SIFT is known to be a strong, but computationally expensive feature descriptor, Ferns classification is fast, but requires large amounts of memory. This renders both original designs unsuitable for mobile phones. We give detailed descriptions on how we modified both approaches to make them suitable for mobile phones. We present evaluations on robustness and performance on various devices and finally discuss their appropriateness for Augmented Reality applications.},
author = {Wagner, Daniel and Reitmayr, Gerhard and Mulloni, Alessandro and Drummond, Tom and Schmalstieg, Dieter},
booktitle = {ISMAR '08: Proceedings of the 7th IEEE and ACM International Symposium on Mixed and Augmented Reality},
doi = {10.1109/ISMAR.2008.4637338},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Pose tracking from natural features on mobile phones - Wagner et al. - 2008.pdf:pdf},
isbn = {978-1-4244-2840-3},
keywords = {mobile phones,natural features,pose tracking},
month = sep,
pages = {125--134},
publisher = {IEEE Computer Society},
title = {{Pose tracking from natural features on mobile phones}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4637338},
year = {2008}
}
@phdthesis{Sommer2010,
abstract = {With the soaring number of transistors per chip, graphic processing units have devel- oped from coprocessors specialized in 3D graphics into fully programmable parallel processors providing superior floating point performance and memory bandwidth for data-parallel computations. This type of data processing is commonly found in computer vision algorithms that process images and thus are predestined for a par- allel implementation running on a GPU which offers a significant speed-up without requiring additional or expensive hardware. In this thesis an OpenCL implementation of the popular SURF algorithm for interest point detection and matching is presented. After a comprehensive review of the algorithm’s details and underlying theory, a closer look is taken at the interface provided by the OpenCL framework and the specifics of the GPU. These sections are followed by a description of the kernel code running on the parallel processor and the supporting framework that facilitates development and testing. Finally the runtime and detection/matching performance of the proposed implementation is evaluated based on a set of standard image sequences and a reference implementation running on the CPU.},
author = {Sommer, Torsten},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Interest Point Detection and Matching on SIMD Architectures - Sommer - 2010.pdf:pdf},
pages = {82},
school = {Technische Universit\"{a}t M\"{u}nchen},
title = {{Interest Point Detection and Matching on SIMD Architectures}},
year = {2010}
}
@article{Berry2007,
abstract = {This case study describes the use of a wearable camera, SenseCam, which automatically captures several hundred images per day, to aid autobiographical memory in a patient, Mrs B, with severe memory impairment following limbic encephalitis. By using SenseCam to record personally experienced events we intended that SenseCam pictures would form a pictorial diary to cue and consolidate autobiographical memories. After wearing SenseCam, Mrs B plugged the camera into a PC which uploaded the recorded images and allowed them to be viewed at speed, like watching a movie. In the control condition, a written diary was used to record and remind her of autobiographical events. After viewing SenseCam images, Mrs B was able to recall approximately 80\% of recent, personally experienced events. Retention of events was maintained in the long-term, 11 months afterwards, and without viewing SenseCam images for three months. After using the written diary, Mrs B was able to remember around 49\% of an event; after one month with no diary readings she had no recall of the same events. We suggest that factors relating to rehearsal/re-consolidation may have enabled SenseCam images to improve Mrs B's autobiographical recollection.},
author = {Berry, Emma and Kapur, Narinder and Williams, Lyndsay and Hodges, Steve and Watson, Peter and Smyth, Gavin and Srinivasan, James and Smith, Reg and Wilson, Barbara A. and Wood, Ken},
doi = {10.1080/09602010601029780},
file = {:D$\backslash$:/\_Docs/mendeleyMain/The use of a wearable camera, SenseCam, as a pictorial diary to improve autobiographical memory in a patient with limbic encephalitis a preliminary report. - Berry et al. - 2007.pdf:pdf},
issn = {0960-2011},
journal = {Neuropsychological rehabilitation},
keywords = {Autobiography as Topic,Encephalitis,Encephalitis: complications,Encephalitis: pathology,Female,Humans,Limbic System,Limbic System: pathology,Memory Disorders,Memory Disorders: etiology,Memory Disorders: rehabilitation,Mental Recall,Mental Recall: physiology,Middle Aged,Neuropsychological Tests,Self Concept},
number = {4-5},
pages = {582--601},
pmid = {17676536},
title = {{The use of a wearable camera, SenseCam, as a pictorial diary to improve autobiographical memory in a patient with limbic encephalitis: a preliminary report.}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/17676536},
volume = {17},
year = {2007}
}
@techreport{Bush2006,
address = {M\'{e}xico, D.F.},
author = {Bush, Virgilio Partida},
institution = {CONAPO},
pages = {29},
title = {{Proyecciones de la poblaci\'{o}n de M\'{e}xico 2005-2050}},
year = {2006}
}
@article{Rekimoto1997,
abstract = {A way to realize augmented reality (AR) is proposed that can be used in an open environment by introducing the concept of identification (ID) awareness and a hand-held video see-through display. Unlike other AR systems that use head-mounted or head-up displays, our approach employs the combination of a palmtop-sized display and a small video camera. A user sees the real world through the display device, with added computer-augmented information. In this configuration, the 'magnifying-glass' approach, the user is not required to wear any cumbersome headgear and can easily move the display device around like a magnifying glass to compare real and augmented images. The video camera also obtains information related to real-world situations. The system recognizes real-world objects using the video images by reading ID tags. Based on the recognized tag, the system retrieves and displays information about the real-world object to the user. We describe several potential applications.},
author = {Rekimoto, Jun},
journal = {Presence: Teleoperators and Virtual Environments},
number = {4},
pages = {399--412},
title = {{Navicam: A Magnifying Glass Approach to Augmented Reality}},
volume = {6},
year = {1997}
}
@article{Wilson2003,
abstract = {NeuroPage, a paging service designed to reduce the everyday memory and/or planning problems of people with neurological deficits, is described. Following several research studies carried out to evaluate the system, a British local health authority set up a nationwide commercial NeuroPage service. A report on the first 40 clients recruited to the service is provided. The age range was 14-81 years, the majority of clients were men and the most frequent diagnosis was traumatic brain injury although a number of different diagnostic groups were represented. The types of messages sent and the group's satisfaction with NeuroPage are considered. Three brief case studies to illustrate the different ways clients used the system are presented.},
author = {Wilson, Barbara A. and Scott, Helena and Evans, Jonathan and Emslie, Hazel},
journal = {NeuroRehabilitation},
keywords = {brain injury,external aids,memory,rehabilitation},
number = {1},
pages = {3--8},
title = {{Preliminary Report of a NeuroPage Service Within the Health Care System}},
volume = {18},
year = {2003}
}
@incollection{Wilson1984,
address = {London, UK},
author = {Wilson, Barbara A. and Moffat, N.},
booktitle = {Everyday Memory, Actions and Absent-Mindedness},
editor = {Harris, John E. and Morris, Peter Edwin},
pages = {207--233},
publisher = {Academic Pres},
title = {{Rehabilitation of Memory for Everyday Life}},
year = {1984}
}
@book{Fearnley1996,
author = {Fearnley, Kate and HEBS},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Coping with Dementia A Handbook for Carers - Fearnley, HEBS - 1996.pdf:pdf},
isbn = {9781844853366},
publisher = {Health Education Board for Scotland},
title = {{Coping with Dementia: A Handbook for Carers}},
year = {1996}
}
@article{Huppert2000,
abstract = {Remembering to carry out intended actions is important for the effective performance of daily activities, but generally declines with age. This study aimed to establish the prevalence of prospective memory impairment in the elderly population, describe the age function and identify risk factors for impairment. An event-based prospective memory test was administered to 11,956 participants aged 65 + in the screening stage of a population-based study, the Medical Research Council Cognitive Function and Ageing Study (MRC CFAS). Only 54\% of the sample succeeded on the task. Logistic regression analysis showed that successful performance was strongly and linearly related to age, and that male gender, less education and lower social status substantially increased the risk of prospective memory impairment. There was a very high prevalence of prospective memory impairment in 388 individuals with very mild dementia, of whom only 8\% succeeded on the task. These findings raise concerns about the well-being and safety of many older people.},
author = {Huppert, Felicia A. and Johnson, Tony and Nickson, Judith},
doi = {10.1002/acp.771},
issn = {0888-4080},
journal = {Applied Cognitive Psychology},
number = {7},
pages = {63--81},
title = {{High Prevalence of Prospective Memory Impairment in the Elderly and in Early-stage Dementia: Findings from a Population-based Study}},
url = {http://doi.wiley.com/10.1002/acp.771},
volume = {14},
year = {2000}
}
@inproceedings{Goodman2002,
address = {London, UK},
author = {Goodman, Joy and Brewster, Steve and Gray, Phil and Glasgow, G.},
booktitle = {Proceedings of the 16th British HCI Conference},
file = {:D$\backslash$:/\_Docs/mendeleyMain/Memory Aids for Older People - Goodman et al. - 2002.pdf:pdf},
keywords = {context-aware,handhelds,however,it is possible to,memory,off-set these problems to,older people,some extent through the,use of computers and},
title = {{Memory Aids for Older People}},
year = {2002}
}
@incollection{Kapura,
abstract = {This chapter reviews external memory aids and computer-based resources in the management of patients with memory dificulties following brain diease or brain injury. We are concerned primarily with treatment interventions for memory-disordered people. Although various forms of memory aids and computer-based resources may be useful in the initial neuropsychological assessment and diagnosis of such people, these applications are outside the scope of this chapter. We focus mainly on adults who have suffered a brain insult, although we recognize that children with brain damage and those with memory loss related to psychiatric conditions might also benefit from external aids and computers.},
author = {Kapur, Narinder and Glisky, E. L. and Wilson, Barbara A.},
booktitle = {The Essential Handbook of Memory Disorders},
edition = {2nd},
editor = {Baddeley, A. D. and Kopelman, M. D. and Wilson, B.A.},
keywords = {Memory aids,Memory rehabilitation,Treatment interventions},
pages = {757--784},
publisher = {Chichester},
title = {{External memory aids and computers in memory rehabilitation}},
year = {2002}
}
@article{Castro2008,
abstract = {The nature of a context-aware application in hospital work demands a reliable and accurate location system. The activity for which this location information is needed determines to a great extent the relevancy of this contextual variable, since a minor error in delivering patient-based information can be critical. In this correspondence, we present an enhanced technique to infer the location of users in a hospital setting based on the strength of radio-frequency signals received by mobile devices that are used to train a neural network. The approach uses the neighbors surrounding the location to be estimated to track users continuously. This neighborhood eases the training and is used to simulate previous time instant guesses to reduce the location estimation error and alleviate the hopping trajectories of users. The results obtained by using this approach are in the order of 1.3 m for the average distance error during continuous motion.},
author = {Castro, Luis A. and Favela, Jesus},
doi = {10.1109/TSMCC.2008.2001572},
issn = {1094-6977},
journal = {IEEE Transactions on Systems, Man, and Cybernetics, Part C: Applications and Reviews},
month = nov,
number = {6},
pages = {861--866},
title = {{Reducing the Uncertainty on Location Estimation of Mobile Users to Support Hospital Work}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4656567},
volume = {38},
year = {2008}
}
