%% This BibTeX bibliography file was created using BibDesk.
%% http://bibdesk.sourceforge.net/


%% Created for John Daigle at 2011-03-13 21:45:08 -0400 


%% Saved with string encoding Unicode (UTF-8) 



@inproceedings{1103933,
	Abstract = { The graphics processor (GPU) on today's commodity video cards has evolved into an extremely powerful and flexible processor. The latest graphics architectures provide tremendous memory bandwidth and computational horsepower, with fully programmable vertex and pixel processing units that support vector operations up to full IEEE floating point precision. High level languages have emerged for graphics hardware, making this computational power accessible. Architecturally, GPUs are highly parallel streaming processors optimized for vector operations, with both MIMD (vertex) and SIMD (pixel) pipelines. Not surprisingly, these processors are capable of general-purpose computation beyond the graphics applications for which they were designed. Researchers have found that exploiting the GPU can accelerate some problems by over an order of magnitude over the CPU.However, significant barriers still exist for the developer who wishes to use the inexpensive power of commodity graphics hardware, whether for in-game simulation of physics of for conventional computational science. These chips are designed for and driven by video game development; the programming model is unusual, the programming environment is tightly constrained, and the underlying architectures are largely secret. The GPU developer must be an expert in computer graphics and its computational idioms to make effective use of the hardware, and still pitfalls abound. This course provides a detailed introduction to general purpose computation on graphics hardware (GPGPU). We emphasize core computational building blocks, ranging from linear algebra to database queries, and review the tools, perils, and tricks of the trade in GPU programming. Finally we present some interesting and important case studies on general-purpose applications of graphics hardware.The course presenters are experts on general-purpose GPU computation from academia and industry, and have presented papers and tutorials on the topic at SIGGRAPH, Graphics Hardware, Game Developers Conference, and elsewhere. },
	Address = {New York, NY, USA},
	Author = {Luebke, David and Harris, Mark and Kr\"{u}ger, Jens and Purcell, Tim and Govindaraju, Naga and Buck, Ian and Woolley, Cliff and Lefohn, Aaron},
	Bdsk-Color = {4285517823},
	Booktitle = {SIGGRAPH '04: ACM SIGGRAPH 2004 Course Notes},
	Date-Added = {2011-03-13 21:44:22 -0400},
	Date-Modified = {2011-03-13 21:44:22 -0400},
	Doi = {http://doi.acm.org/10.1145/1103900.1103933},
	Keywords = {simulation},
	Location = {Los Angeles, CA},
	Pages = {33},
	Publisher = {ACM},
	Title = {GPGPU: general purpose computation on graphics hardware},
	Year = {2004},
	Bdsk-File-1 = {YnBsaXN0MDDUAQIDBAUIJidUJHRvcFgkb2JqZWN0c1gkdmVyc2lvblkkYXJjaGl2ZXLRBgdUcm9vdIABqAkKFRYXGyIjVSRudWxs0wsMDQ4RFFpOUy5vYmplY3RzV05TLmtleXNWJGNsYXNzog8QgASABqISE4ACgAOAB1lhbGlhc0RhdGFccmVsYXRpdmVQYXRo0hgNGRpXTlMuZGF0YU8RAXoAAAAAAXoAAgAADE1hY2ludG9zaCBIRAAAAAAAAAAAAAAAAAAAAMarQ7ZIKwAAAAl6oQsxMTAzOTMzLnBkZgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACXrxxlc6OwAAAAAAAAAAAAQAAgAACSAAAAAAAAAAAAAAAAAAAAAGQmlibGlvABAACAAAxqt79gAAABEACAAAxldyewAAAAEAEAAJeqEACXoTAAiZSgAAkOcAAgA0TWFjaW50b3NoIEhEOlVzZXJzOnBhdWw6RG9jdW1lbnRzOkJpYmxpbzoxMTAzOTMzLnBkZgAOABgACwAxADEAMAAzADkAMwAzAC4AcABkAGYADwAaAAwATQBhAGMAaQBuAHQAbwBzAGgAIABIAEQAEgAnVXNlcnMvcGF1bC9Eb2N1bWVudHMvQmlibGlvLzExMDM5MzMucGRmAAATAAEvAAAVAAIAC///AACABdIcHR4fWCRjbGFzc2VzWiRjbGFzc25hbWWjHyAhXU5TTXV0YWJsZURhdGFWTlNEYXRhWE5TT2JqZWN0XxAeLi4vLi4vLi4vLi4vQmlibGlvLzExMDM5MzMucGRm0hwdJCWiJSFcTlNEaWN0aW9uYXJ5EgABhqBfEA9OU0tleWVkQXJjaGl2ZXIACAARABYAHwAoADIANQA6ADwARQBLAFIAXQBlAGwAbwBxAHMAdgB4AHoAfACGAJMAmACgAh4CIAIlAi4COQI9AksCUgJbAnwCgQKEApEClgAAAAAAAAIBAAAAAAAAACgAAAAAAAAAAAAAAAAAAAKo},
	Bdsk-Url-1 = {http://doi.acm.org/10.1145/1103900.1103933}}

@inproceedings{Buck:2004:BGS:1186562.1015800,
	Abstract = {In this paper, we present Brook for GPUs, a system for general-purpose computation on programmable graphics hardware. Brook extends C to include simple data-parallel constructs, enabling the use of the GPU as a streaming co-processor. We present a compiler and runtime system that abstracts and virtualizes many aspects of graphics hardware. In addition, we present an analysis of the effectiveness of the GPU as a compute engine compared to the CPU, to determine when the GPU can outperform the CPU for a particular algorithm. We evaluate our system with five applications, the SAXPY and SGEMV BLAS operators, image segmentation, FFT, and ray tracing. For these applications, we demonstrate that our Brook implementations perform comparably to hand-written GPU code and up to seven times faster than their CPU counterparts.},
	Acmid = {1015800},
	Address = {New York, NY, USA},
	Author = {Buck, Ian and Foley, Tim and Horn, Daniel and Sugerman, Jeremy and Fatahalian, Kayvon and Houston, Mike and Hanrahan, Pat},
	Bdsk-Color = {4285517823},
	Booktitle = {ACM SIGGRAPH 2004 Papers},
	Date-Added = {2011-03-13 21:44:22 -0400},
	Date-Modified = {2011-03-13 21:44:22 -0400},
	Doi = {http://doi.acm.org/10.1145/1186562.1015800},
	Keywords = {Data Parallel Computing, GPU Computing, Brook, Programmable Graphics Hardware, Stream Computing},
	Location = {Los Angeles, California},
	Numpages = {10},
	Pages = {777--786},
	Publisher = {ACM},
	Series = {SIGGRAPH '04},
	Title = {Brook for GPUs: stream computing on graphics hardware},
	Url = {http://doi.acm.org/10.1145/1186562.1015800},
	Year = {2004},
	Bdsk-Url-1 = {http://doi.acm.org/10.1145/1186562.1015800}}

@inproceedings{1090126,
	Abstract = { With the introduction in 2003 of standard GPUs with 32 bit floating point numbers and programmable Vertex and Fragment processors, the processing power of the GPU was made available to non-graphics applications. As the GPU is aimed at computer graphics, the concepts in GPU-programming are based on computer graphics terminology, and the strategies for programming have to be based on the architecture of the graphics pipeline. At SINTEF in Norway a 4-year strategic institute project (2004-2007) "Graphics hardware as a high-end computational resource", http://www.math.sintef.no/gpu/ aims at making GPUs available as a computational resource both to academia and industry. This paper addresses the challenges of GPU-programming and results of the project's first year. },
	Address = {New York, NY, USA},
	Author = {Dokken, Tor and Hagen, Trond R. and Hjelmervik, Jon M.},
	Bdsk-Color = {4285517823},
	Booktitle = {SCCG '05: Proceedings of the 21st spring conference on Computer graphics},
	Date-Added = {2011-03-13 21:44:22 -0400},
	Date-Modified = {2011-03-13 21:44:22 -0400},
	Doi = {http://doi.acm.org/10.1145/1090122.1090126},
	Isbn = {1-59593-203-6},
	Keywords = {GPU, geometry, linear algebra, partial differential equations},
	Location = {Budmerice, Slovakia},
	Pages = {21--26},
	Publisher = {ACM},
	Title = {The GPU as a high performance computational resource},
	Year = {2005},
	Bdsk-File-1 = {YnBsaXN0MDDUAQIDBAUIJidUJHRvcFgkb2JqZWN0c1gkdmVyc2lvblkkYXJjaGl2ZXLRBgdUcm9vdIABqAkKFRYXGyIjVSRudWxs0wsMDQ4RFFpOUy5vYmplY3RzV05TLmtleXNWJGNsYXNzog8QgASABqISE4ACgAOAB1lhbGlhc0RhdGFccmVsYXRpdmVQYXRo0hgNGRpXTlMuZGF0YU8RAYYAAAAAAYYAAgAADE1hY2ludG9zaCBIRAAAAAAAAAAAAAAAAAAAAMarQ7ZIKwAAAAl6oQ5wMjEtZG9ra2VuLnBkZgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACXvwxlF7OQAAAAAAAAAAAAQAAgAACSAAAAAAAAAAAAAAAAAAAAAGQmlibGlvABAACAAAxqt79gAAABEACAAAxlGzeQAAAAEAEAAJeqEACXoTAAiZSgAAkOcAAgA3TWFjaW50b3NoIEhEOlVzZXJzOnBhdWw6RG9jdW1lbnRzOkJpYmxpbzpwMjEtZG9ra2VuLnBkZgAADgAeAA4AcAAyADEALQBkAG8AawBrAGUAbgAuAHAAZABmAA8AGgAMAE0AYQBjAGkAbgB0AG8AcwBoACAASABEABIAKlVzZXJzL3BhdWwvRG9jdW1lbnRzL0JpYmxpby9wMjEtZG9ra2VuLnBkZgATAAEvAAAVAAIAC///AACABdIcHR4fWCRjbGFzc2VzWiRjbGFzc25hbWWjHyAhXU5TTXV0YWJsZURhdGFWTlNEYXRhWE5TT2JqZWN0XxAhLi4vLi4vLi4vLi4vQmlibGlvL3AyMS1kb2trZW4ucGRm0hwdJCWiJSFcTlNEaWN0aW9uYXJ5EgABhqBfEA9OU0tleWVkQXJjaGl2ZXIACAARABYAHwAoADIANQA6ADwARQBLAFIAXQBlAGwAbwBxAHMAdgB4AHoAfACGAJMAmACgAioCLAIxAjoCRQJJAlcCXgJnAosCkAKTAqACpQAAAAAAAAIBAAAAAAAAACgAAAAAAAAAAAAAAAAAAAK3},
	Bdsk-Url-1 = {http://doi.acm.org/10.1145/1090122.1090126}}

@inproceedings{1360618,
	Abstract = {We present BSGP, a new programming language for general purpose computation on the GPU. A BSGP program looks much the same as a sequential C program. Programmers only need to supply a bare minimum of extra information to describe parallel processing on GPUs. As a result, BSGP programs are easy to read, write, and maintain. Moreover, the ease of programming does not come at the cost of performance. A well-designed BSGP compiler converts BSGP programs to kernels and combines them using optimally allocated temporary streams. In our benchmark, BSGP programs achieve similar or better performance than well-optimized CUDA programs, while the source code complexity and programming time are significantly reduced. To test BSGP's code efficiency and ease of programming, we implemented a variety of GPU applications, including a highly sophisticated X3D parser that would be extremely difficult to develop with existing GPU programming languages.},
	Address = {New York, NY, USA},
	Annote = {The authors present a new programming language for implementing a Bulk Synchronous Parallel model on a GPU. They claim that their language is simpler than CUDA, faster for development, and provides performance benefits as well. The paper describes the language and shows some code examples for basic algorithms in both BSGP and CUDA. In addition, they provide the results of experiments which support their assertions of performance enhancement and code simplicity.},
	Author = {Hou, Qiming and Zhou, Kun and Guo, Baining},
	Bdsk-Color = {4285517823},
	Booktitle = {SIGGRAPH '08: ACM SIGGRAPH 2008 papers},
	Date-Added = {2011-03-13 21:44:22 -0400},
	Date-Modified = {2011-03-13 21:44:22 -0400},
	Doi = {http://doi.acm.org/10.1145/1399504.1360618},
	Isbn = {978-1-4503-0112-1},
	Keywords = {bulk synchronous parallel programming, programable graphics hardware, stream processing, thread manipulation},
	Location = {Los Angeles, California},
	Pages = {1--12},
	Publisher = {ACM},
	Title = {BSGP: bulk-synchronous GPU programming},
	Year = {2008},
	Bdsk-File-1 = {YnBsaXN0MDDUAQIDBAUIJidUJHRvcFgkb2JqZWN0c1gkdmVyc2lvblkkYXJjaGl2ZXLRBgdUcm9vdIABqAkKFRYXGyIjVSRudWxs0wsMDQ4RFFpOUy5vYmplY3RzV05TLmtleXNWJGNsYXNzog8QgASABqISE4ACgAOAB1lhbGlhc0RhdGFccmVsYXRpdmVQYXRo0hgNGRpXTlMuZGF0YU8RAWwAAAAAAWwAAgAADE1hY2ludG9zaCBIRAAAAAAAAAAAAAAAAAAAAMarQ7ZIKwAAAAub7gthMTktaG91LnBkZgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOkV1x/S1wAAAAAAAAAAAAAUAAgAACSAAAAAAAAAAAAAAAAAAAAAJRG93bmxvYWRzAAAQAAgAAMare/YAAAARAAgAAMf07gAAAAABAAwAC5vuAAiZSgAAkOcAAgAtTWFjaW50b3NoIEhEOlVzZXJzOnBhdWw6RG93bmxvYWRzOmExOS1ob3UucGRmAAAOABgACwBhADEAOQAtAGgAbwB1AC4AcABkAGYADwAaAAwATQBhAGMAaQBuAHQAbwBzAGgAIABIAEQAEgAgVXNlcnMvcGF1bC9Eb3dubG9hZHMvYTE5LWhvdS5wZGYAEwABLwAAFQACAAv//wAAgAXSHB0eH1gkY2xhc3Nlc1okY2xhc3NuYW1lox8gIV1OU011dGFibGVEYXRhVk5TRGF0YVhOU09iamVjdF8QJC4uLy4uLy4uLy4uLy4uL0Rvd25sb2Fkcy9hMTktaG91LnBkZtIcHSQloiUhXE5TRGljdGlvbmFyeRIAAYagXxAPTlNLZXllZEFyY2hpdmVyAAgAEQAWAB8AKAAyADUAOgA8AEUASwBSAF0AZQBsAG8AcQBzAHYAeAB6AHwAhgCTAJgAoAIQAhICFwIgAisCLwI9AkQCTQJ0AnkCfAKJAo4AAAAAAAACAQAAAAAAAAAoAAAAAAAAAAAAAAAAAAACoA==},
	Bdsk-Url-1 = {http://doi.acm.org/10.1145/1399504.1360618}}

@inproceedings{1555299,
	Abstract = {The availability of low cost powerful parallel graphics cards has stimulated the port of Genetic Programming (GP) on Graphics Processing Units (GPUs). Our work focuses on the possibilities offered by Nvidia G80 GPUs when programmed in the CUDA language. We compare two parallelization schemes that evaluate several GP programs in parallel. We show that the fine grain distribution of computations over the elementary processors greatly impacts performances. We also present memory and representation optimizations that further enhance computation speed, up to 2.8 billion GP operations per second. The code has been developed with the well known ECJ library.},
	Address = {New York, NY, USA},
	Author = {Robilliard, Denis and Marion, Virginie and Fonlupt, Cyril},
	Bdsk-Color = {4285517823},
	Booktitle = {BADS '09: Proceedings of the 2009 workshop on Bio-inspired algorithms for distributed systems},
	Date-Added = {2011-03-13 21:44:22 -0400},
	Date-Modified = {2011-03-13 21:44:22 -0400},
	Doi = {http://doi.acm.org/10.1145/1555284.1555299},
	Isbn = {978-1-60558-584-0},
	Keywords = {genetic algorithms, genetic programming, graphics processing units, parallel processing},
	Location = {Barcelona, Spain},
	Pages = {85--94},
	Publisher = {ACM},
	Title = {High performance genetic programming on GPU},
	Year = {2009},
	Bdsk-File-1 = {YnBsaXN0MDDUAQIDBAUIJidUJHRvcFgkb2JqZWN0c1gkdmVyc2lvblkkYXJjaGl2ZXLRBgdUcm9vdIABqAkKFRYXGyIjVSRudWxs0wsMDQ4RFFpOUy5vYmplY3RzV05TLmtleXNWJGNsYXNzog8QgASABqISE4ACgAOAB1lhbGlhc0RhdGFccmVsYXRpdmVQYXRo0hgNGRpXTlMuZGF0YU8RAZYAAAAAAZYAAgAADE1hY2ludG9zaCBIRAAAAAAAAAAAAAAAAAAAAMarQ7ZIKwAAAAl6oRJwODUtcm9iaWxsaWFyZC5wZGYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYUglyE1HjwAAAAAAAAAAAAQAAgAACSAAAAAAAAAAAAAAAAAAAAAGQmlibGlvABAACAAAxqt79gAAABEACAAAyE1/zwAAAAEAEAAJeqEACXoTAAiZSgAAkOcAAgA7TWFjaW50b3NoIEhEOlVzZXJzOnBhdWw6RG9jdW1lbnRzOkJpYmxpbzpwODUtcm9iaWxsaWFyZC5wZGYAAA4AJgASAHAAOAA1AC0AcgBvAGIAaQBsAGwAaQBhAHIAZAAuAHAAZABmAA8AGgAMAE0AYQBjAGkAbgB0AG8AcwBoACAASABEABIALlVzZXJzL3BhdWwvRG9jdW1lbnRzL0JpYmxpby9wODUtcm9iaWxsaWFyZC5wZGYAEwABLwAAFQACAAv//wAAgAXSHB0eH1gkY2xhc3Nlc1okY2xhc3NuYW1lox8gIV1OU011dGFibGVEYXRhVk5TRGF0YVhOU09iamVjdF8QJS4uLy4uLy4uLy4uL0JpYmxpby9wODUtcm9iaWxsaWFyZC5wZGbSHB0kJaIlIVxOU0RpY3Rpb25hcnkSAAGGoF8QD05TS2V5ZWRBcmNoaXZlcgAIABEAFgAfACgAMgA1ADoAPABFAEsAUgBdAGUAbABvAHEAcwB2AHgAegB8AIYAkwCYAKACOgI8AkECSgJVAlkCZwJuAncCnwKkAqcCtAK5AAAAAAAAAgEAAAAAAAAAKAAAAAAAAAAAAAAAAAAAAss=},
	Bdsk-Url-1 = {http://doi.acm.org/10.1145/1555284.1555299}}

@inproceedings{1504194,
	Address = {New York, NY, USA},
	Annote = {GPGPUs have recently emerged as powerful vehicles for general-purpose high-performance computing. Although a new Compute Unified Device Architecture (CUDA) programming model from NVIDIA offers improved programmability for general computing, programming GPGPUs is still complex and error-prone. This paper presents a compiler framework for automatic source-to-source translation of standard OpenMP applications into CUDA-based GPGPU applications. The goal of this translation is to further improve programmability and make existing OpenMP applications amenable to execution on GPGPUs. In this paper, we have identified several key transformation techniques, which enable efficient GPU global memory access, to achieve high performance. Experimental results from two important kernels (JACOBI and SPMUL) and two NAS OpenMP Parallel Benchmarks (EP and CG) show that the described translator and compile-time optimizations work well on both regular and irregular applications, leading to performance improvements of up to 50X over the unoptimized translation (up to 328X over serial).

},
	Author = {Lee, Seyong and Min, Seung-Jai and Eigenmann, Rudolf},
	Bdsk-Color = {4285517823},
	Booktitle = {PPoPP '09: Proceedings of the 14th ACM SIGPLAN symposium on Principles and practice of parallel programming},
	Date-Added = {2011-03-13 21:44:22 -0400},
	Date-Modified = {2011-03-13 21:44:22 -0400},
	Doi = {http://doi.acm.org/10.1145/1504176.1504194},
	Isbn = {978-1-60558-397-6},
	Keywords = {automatic translation, compiler optimization, cuda, gpu, openmp},
	Location = {Raleigh, NC, USA},
	Pages = {101--110},
	Publisher = {ACM},
	Title = {OpenMP to GPGPU: a compiler framework for automatic translation and optimization},
	Year = {2009},
	Bdsk-File-1 = {YnBsaXN0MDDUAQIDBAUIJidUJHRvcFgkb2JqZWN0c1gkdmVyc2lvblkkYXJjaGl2ZXLRBgdUcm9vdIABqAkKFRYXGyIjVSRudWxs0wsMDQ4RFFpOUy5vYmplY3RzV05TLmtleXNWJGNsYXNzog8QgASABqISE4ACgAOAB1lhbGlhc0RhdGFccmVsYXRpdmVQYXRo0hgNGRpXTlMuZGF0YU8RAX4AAAAAAX4AAgAADE1hY2ludG9zaCBIRAAAAAAAAAAAAAAAAAAAAMarQ7ZIKwAAAAl6oQxwMTAxLWxlZS5wZGYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQBfcyAh+HwAAAAAAAAAAAAQAAgAACSAAAAAAAAAAAAAAAAAAAAAGQmlibGlvABAACAAAxqt79gAAABEACAAAyAi2XwAAAAEAEAAJeqEACXoTAAiZSgAAkOcAAgA1TWFjaW50b3NoIEhEOlVzZXJzOnBhdWw6RG9jdW1lbnRzOkJpYmxpbzpwMTAxLWxlZS5wZGYAAA4AGgAMAHAAMQAwADEALQBsAGUAZQAuAHAAZABmAA8AGgAMAE0AYQBjAGkAbgB0AG8AcwBoACAASABEABIAKFVzZXJzL3BhdWwvRG9jdW1lbnRzL0JpYmxpby9wMTAxLWxlZS5wZGYAEwABLwAAFQACAAv//wAAgAXSHB0eH1gkY2xhc3Nlc1okY2xhc3NuYW1lox8gIV1OU011dGFibGVEYXRhVk5TRGF0YVhOU09iamVjdF8QHy4uLy4uLy4uLy4uL0JpYmxpby9wMTAxLWxlZS5wZGbSHB0kJaIlIVxOU0RpY3Rpb25hcnkSAAGGoF8QD05TS2V5ZWRBcmNoaXZlcgAIABEAFgAfACgAMgA1ADoAPABFAEsAUgBdAGUAbABvAHEAcwB2AHgAegB8AIYAkwCYAKACIgIkAikCMgI9AkECTwJWAl8CgQKGAokClgKbAAAAAAAAAgEAAAAAAAAAKAAAAAAAAAAAAAAAAAAAAq0=},
	Bdsk-Url-1 = {http://doi.acm.org/10.1145/1504176.1504194}}

@inproceedings{5289129,
	Abstract = {We present and analyze two new communication libraries, cudaMPI and glMPI, that provide an MPI-like message passing interface to communicate data stored on the graphics cards of a distributed-memory parallel computer. These libraries can help applications that perform general purpose computations on these networked GPU clusters. We explore how to efficiently support both point-to-point and collective communication for either contiguous or noncontiguous data on modern graphics cards. Our software design is informed by a detailed analysis of the actual performance of modern graphics hardware, for which we develop and test a simple but useful performance model.},
	Annote = {The authors present two new MPI libraries for cuda. Message passing is primarily handled by the CPU. The authors justification for this choice is that message startup dominates message passing, so that using the GPUs capacity to send small messages would create communications bottlenecks avoidable by passing messages in large bundles through the CPU.

Experiments were conducted pitting each library against the other, but no comparisons were made between the MPI libraries and standard cuda implementation.},
	Author = {Lawlor, O.S.},
	Bdsk-Color = {4285517823},
	Booktitle = {Cluster Computing and Workshops, 2009. CLUSTER '09. IEEE International Conference on},
	Date-Added = {2011-03-13 21:44:22 -0400},
	Date-Modified = {2011-03-13 21:44:22 -0400},
	Doi = {10.1109/CLUSTR.2009.5289129},
	Issn = {1552-5244},
	Keywords = {GPGPU clusters;communication libraries;cudaMPI;distributed-memory parallel computer;glMPI;graphics cards;message passing interface;software design;computer graphics;message passing;parallel processing;},
	Month = {31 2009-sept. 4},
	Pages = {1 -8},
	Title = {Message passing for GPGPU clusters: CudaMPI},
	Year = {2009},
	Bdsk-File-1 = {YnBsaXN0MDDUAQIDBAUIJidUJHRvcFgkb2JqZWN0c1gkdmVyc2lvblkkYXJjaGl2ZXLRBgdUcm9vdIABqAkKFRYXGyIjVSRudWxs0wsMDQ4RFFpOUy5vYmplY3RzV05TLmtleXNWJGNsYXNzog8QgASABqISE4ACgAOAB1lhbGlhc0RhdGFccmVsYXRpdmVQYXRo0hgNGRpXTlMuZGF0YU8RAXoAAAAAAXoAAgAADE1hY2ludG9zaCBIRAAAAAAAAAAAAAAAAAAAAMarQ7ZIKwAAAAl6oQs1Mjg5MTI5LnBkZgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOwwxx/e15wAAAAAAAAAAAAQAAgAACSAAAAAAAAAAAAAAAAAAAAAGQmlibGlvABAACAAAxqt79gAAABEACAAAx/fuJwAAAAEAEAAJeqEACXoTAAiZSgAAkOcAAgA0TWFjaW50b3NoIEhEOlVzZXJzOnBhdWw6RG9jdW1lbnRzOkJpYmxpbzo1Mjg5MTI5LnBkZgAOABgACwA1ADIAOAA5ADEAMgA5AC4AcABkAGYADwAaAAwATQBhAGMAaQBuAHQAbwBzAGgAIABIAEQAEgAnVXNlcnMvcGF1bC9Eb2N1bWVudHMvQmlibGlvLzUyODkxMjkucGRmAAATAAEvAAAVAAIAC///AACABdIcHR4fWCRjbGFzc2VzWiRjbGFzc25hbWWjHyAhXU5TTXV0YWJsZURhdGFWTlNEYXRhWE5TT2JqZWN0XxAeLi4vLi4vLi4vLi4vQmlibGlvLzUyODkxMjkucGRm0hwdJCWiJSFcTlNEaWN0aW9uYXJ5EgABhqBfEA9OU0tleWVkQXJjaGl2ZXIACAARABYAHwAoADIANQA6ADwARQBLAFIAXQBlAGwAbwBxAHMAdgB4AHoAfACGAJMAmACgAh4CIAIlAi4COQI9AksCUgJbAnwCgQKEApEClgAAAAAAAAIBAAAAAAAAACgAAAAAAAAAAAAAAAAAAAKo},
	Bdsk-Url-1 = {http://dx.doi.org/10.1109/CLUSTR.2009.5289129}}

@inproceedings{5161065,
	Abstract = {This paper explores the challenges in implementing a message passing interface usable on systems with data-parallel processors. As a case study, we design and implement the ldquoDCGNrdquo API on NVIDIA GPUs that is similar to MPI and allows full access to the underlying architecture. We introduce the notion of data-parallel thread-groups as a way to map resources to MPI ranks. We use a method that also allows the data-parallel processors to run autonomously from user-written CPU code. In order to facilitate communication, we use a sleep-based polling system to store and retrieve messages. Unlike previous systems, our method provides both performance and flexibility. By running a test suite of applications with different communication requirements, we find that a tolerable amount of overhead is incurred, somewhere between one and five percent depending on the application, and indicate the locations where this overhead accumulates. We conclude that with innovations in chipsets and drivers, this overhead will be mitigated and provide similar performance to typical CPU-based MPI implementations while providing fully-dynamic communication.},
	Author = {Stuart, J.A. and Owens, J.D.},
	Bdsk-Color = {4285517823},
	Booktitle = {Parallel Distributed Processing, 2009. IPDPS 2009. IEEE International Symposium on},
	Date-Added = {2011-03-13 21:44:22 -0400},
	Date-Modified = {2011-03-13 21:44:22 -0400},
	Doi = {10.1109/IPDPS.2009.5161065},
	Issn = {1530-2075},
	Keywords = {DCGN API;NVIDIA GPU;data-parallel architecture;data-parallel thread-group;message passing interface;sleep-based polling system;message passing;parallel processing;},
	Month = may,
	Pages = {1 -12},
	Title = {Message passing on data-parallel architectures},
	Year = {2009},
	Bdsk-Url-1 = {http://dx.doi.org/10.1109/IPDPS.2009.5161065},
	Bdsk-File-1 = {YnBsaXN0MDDUAQIDBAUIJidUJHRvcFgkb2JqZWN0c1gkdmVyc2lvblkkYXJjaGl2ZXLRBgdUcm9vdIABqAkKFRYXGyIjVSRudWxs0wsMDQ4RFFpOUy5vYmplY3RzV05TLmtleXNWJGNsYXNzog8QgASABqISE4ACgAOAB1lhbGlhc0RhdGFccmVsYXRpdmVQYXRo0hgNGRpXTlMuZGF0YU8RAX4AAAAAAX4AAgAADE1hY2ludG9zaCBIRAAAAAAAAAAAAAAAAAAAAMarQ7ZIKwAAAAl6oQwwNTE2MTA2NS5wZGYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACLmEhyaLk2gAAAAAAAAAAAAQAAgAACSAAAAAAAAAAAAAAAAAAAAAGQmlibGlvABAACAAAxqt79gAAABEACAAAyaMdGgAAAAEAEAAJeqEACXoTAAiZSgAAkOcAAgA1TWFjaW50b3NoIEhEOlVzZXJzOnBhdWw6RG9jdW1lbnRzOkJpYmxpbzowNTE2MTA2NS5wZGYAAA4AGgAMADAANQAxADYAMQAwADYANQAuAHAAZABmAA8AGgAMAE0AYQBjAGkAbgB0AG8AcwBoACAASABEABIAKFVzZXJzL3BhdWwvRG9jdW1lbnRzL0JpYmxpby8wNTE2MTA2NS5wZGYAEwABLwAAFQACAAv//wAAgAXSHB0eH1gkY2xhc3Nlc1okY2xhc3NuYW1lox8gIV1OU011dGFibGVEYXRhVk5TRGF0YVhOU09iamVjdF8QHy4uLy4uLy4uLy4uL0JpYmxpby8wNTE2MTA2NS5wZGbSHB0kJaIlIVxOU0RpY3Rpb25hcnkSAAGGoF8QD05TS2V5ZWRBcmNoaXZlcgAIABEAFgAfACgAMgA1ADoAPABFAEsAUgBdAGUAbABvAHEAcwB2AHgAegB8AIYAkwCYAKACIgIkAikCMgI9AkECTwJWAl8CgQKGAokClgKbAAAAAAAAAgEAAAAAAAAAKAAAAAAAAAAAAAAAAAAAAq0=}}

@inproceedings{5429842,
	Abstract = {Compute Unified Device Architecture (CUDA) programmed,Graphic Processing Units (GPUs) are rapidly becoming a major choice in high performance computing. Hence, the number of applications ported to the CUDA platform is growing high. Message Passing Interface(MPI) has been the choice of high performance computing for more than a decade and it has proven its capability in delivering higher performance in parallel applications. CUDA and MPI use different programming approaches but both of them depend on the inherent parallelism of the application to be effective. However, much less research had been carried out to evaluate the performance when CUDA is integrated with other parallel programming paradigms. This paper investigates on integration of these capabilities of both programming approaches and how we can achieve superior performance in general purpose applications. Thus, we have experimented CUDA+MPI programming approach with two well-known algorithms (Strassens Algorithm amp; Conjugate Gradient Algorithm) and shown how we can achieve higher performance by means of using MPI as computation distributing mechanism and CUDA as the main execution engine. We have developed a general purpose matrix multiplication algorithm and a Conjugate Gradient algorithm using CUDA and MPI. In this approach, MPI functions as the data distributing mechanism between the GPU nodes and CUDA as the main computing engine. This allows the programmer to connect GPU nodes via high speed Ethernet without special technologies. Thus, the programmer is enabled to view each GPU node separately as they are and to execute different components of a program in several GPU nodes.},
	Author = {Karunadasa, N.P. and Ranasinghe, D.N.},
	Bdsk-Color = {4285517823},
	Booktitle = {Industrial and Information Systems (ICIIS), 2009 International Conference on},
	Date-Added = {2011-03-13 21:44:22 -0400},
	Date-Modified = {2011-03-13 21:44:22 -0400},
	Doi = {10.1109/ICIINFS.2009.5429842},
	Keywords = {Ethernet;computation distributing mechanism;compute unified device architecture;conjugate gradient algorithm;graphic processing units;high performance computing;matrix multiplication algorithm;message passing interface;parallel programming paradigms;strassens algorithm;computer graphics;conjugate gradient methods;coprocessors;local area networks;matrix multiplication;message passing;parallel architectures;parallel programming;},
	Month = dec.,
	Pages = {331 -336},
	Title = {Accelerating high performance applications with CUDA and MPI},
	Year = {2009},
	Bdsk-Url-1 = {http://dx.doi.org/10.1109/ICIINFS.2009.5429842},
	Bdsk-File-1 = {YnBsaXN0MDDUAQIDBAUIJidUJHRvcFgkb2JqZWN0c1gkdmVyc2lvblkkYXJjaGl2ZXLRBgdUcm9vdIABqAkKFRYXGyIjVSRudWxs0wsMDQ4RFFpOUy5vYmplY3RzV05TLmtleXNWJGNsYXNzog8QgASABqISE4ACgAOAB1lhbGlhc0RhdGFccmVsYXRpdmVQYXRo0hgNGRpXTlMuZGF0YU8RAX4AAAAAAX4AAgAADE1hY2ludG9zaCBIRAAAAAAAAAAAAAAAAAAAAMarQ7ZIKwAAAAl6oQwwNTQyOTg0Mi5wZGYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACLl4hyaLc0gAAAAAAAAAAAAQAAgAACSAAAAAAAAAAAAAAAAAAAAAGQmlibGlvABAACAAAxqt79gAAABEACAAAyaMVEgAAAAEAEAAJeqEACXoTAAiZSgAAkOcAAgA1TWFjaW50b3NoIEhEOlVzZXJzOnBhdWw6RG9jdW1lbnRzOkJpYmxpbzowNTQyOTg0Mi5wZGYAAA4AGgAMADAANQA0ADIAOQA4ADQAMgAuAHAAZABmAA8AGgAMAE0AYQBjAGkAbgB0AG8AcwBoACAASABEABIAKFVzZXJzL3BhdWwvRG9jdW1lbnRzL0JpYmxpby8wNTQyOTg0Mi5wZGYAEwABLwAAFQACAAv//wAAgAXSHB0eH1gkY2xhc3Nlc1okY2xhc3NuYW1lox8gIV1OU011dGFibGVEYXRhVk5TRGF0YVhOU09iamVjdF8QHy4uLy4uLy4uLy4uL0JpYmxpby8wNTQyOTg0Mi5wZGbSHB0kJaIlIVxOU0RpY3Rpb25hcnkSAAGGoF8QD05TS2V5ZWRBcmNoaXZlcgAIABEAFgAfACgAMgA1ADoAPABFAEsAUgBdAGUAbABvAHEAcwB2AHgAegB8AIYAkwCYAKACIgIkAikCMgI9AkECTwJWAl8CgQKGAokClgKbAAAAAAAAAgEAAAAAAAAAKAAAAAAAAAAAAAAAAAAAAq0=}}

@inproceedings{1735698,
	Address = {New York, NY, USA},
	Annote = {Programmers for GPGPU face rapidly changing substrate of programming abstractions, execution models, and hardware implementations. It has been established, through numerous demonstrations for particular conjunctions of application kernel, programming languages, and GPU hardware instance, that it is possible to achieve significant improvements in the price/performance and energy/performance over general purpose processors. But these demonstrations are each the result of significant dedicated programmer labor, which is likely to be duplicated for each new GPU hardware architecture to achieve performance portability.

This paper discusses the implementation, in the R-Stream compiler, of a source to source mapping pathway from a high-level, textbook-style algorithm expression method in ANSI C, to multi-GPGPU accelerated computers. The compiler performs hierarchical decomposition and parallelization of the algorithm between and across host, multiple GPGPUs, and within-GPU. The semantic transformations are expressed within the polyhedral model, including optimization of integrated parallelization, locality, and contiguity tradeoffs. Hierarchical tiling is performed. Communication and synchronizations operations at multiple levels are generated automatically. The resulting mapping is currently emitted in the CUDA programming language.

The GPU backend adds to the range of hardware and accelerator targets for R-Stream and indicates the potential for performance portability of single sources across multiple hardware targets.},
	Author = {Leung, Allen and Vasilache, Nicolas and Meister, Beno\^{\i}t and Baskaran, Muthu and Wohlford, David and Bastoul, C\'{e}dric and Lethin, Richard},
	Bdsk-Color = {4285517823},
	Booktitle = {GPGPU '10: Proceedings of the 3rd Workshop on General-Purpose Computation on Graphics Processing Units},
	Date-Added = {2011-03-13 21:44:22 -0400},
	Date-Modified = {2011-03-13 21:44:22 -0400},
	Doi = {http://doi.acm.org/10.1145/1735688.1735698},
	Isbn = {978-1-60558-935-0},
	Keywords = {CUDA, GPGPU, automatic translation, compiler optimziation, parallelization, polyhedral model},
	Location = {Pittsburgh, Pennsylvania},
	Pages = {51--61},
	Publisher = {ACM},
	Title = {A mapping path for multi-GPGPU accelerated computers from a portable high level programming abstraction},
	Year = {2010},
	Bdsk-File-1 = {YnBsaXN0MDDUAQIDBAUIJidUJHRvcFgkb2JqZWN0c1gkdmVyc2lvblkkYXJjaGl2ZXLRBgdUcm9vdIABqAkKFRYXGyIjVSRudWxs0wsMDQ4RFFpOUy5vYmplY3RzV05TLmtleXNWJGNsYXNzog8QgASABqISE4ACgAOAB1lhbGlhc0RhdGFccmVsYXRpdmVQYXRo0hgNGRpXTlMuZGF0YU8RAYIAAAAAAYIAAgAADE1hY2ludG9zaCBIRAAAAAAAAAAAAAAAAAAAAMarQ7ZIKwAAAAl6oQ1wNTEtbGV1bmcucGRmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQBg/yAh+xAAAAAAAAAAAAAQAAgAACSAAAAAAAAAAAAAAAAAAAAAGQmlibGlvABAACAAAxqt79gAAABEACAAAyAi3BAAAAAEAEAAJeqEACXoTAAiZSgAAkOcAAgA2TWFjaW50b3NoIEhEOlVzZXJzOnBhdWw6RG9jdW1lbnRzOkJpYmxpbzpwNTEtbGV1bmcucGRmAA4AHAANAHAANQAxAC0AbABlAHUAbgBnAC4AcABkAGYADwAaAAwATQBhAGMAaQBuAHQAbwBzAGgAIABIAEQAEgApVXNlcnMvcGF1bC9Eb2N1bWVudHMvQmlibGlvL3A1MS1sZXVuZy5wZGYAABMAAS8AABUAAgAL//8AAIAF0hwdHh9YJGNsYXNzZXNaJGNsYXNzbmFtZaMfICFdTlNNdXRhYmxlRGF0YVZOU0RhdGFYTlNPYmplY3RfECAuLi8uLi8uLi8uLi9CaWJsaW8vcDUxLWxldW5nLnBkZtIcHSQloiUhXE5TRGljdGlvbmFyeRIAAYagXxAPTlNLZXllZEFyY2hpdmVyAAgAEQAWAB8AKAAyADUAOgA8AEUASwBSAF0AZQBsAG8AcQBzAHYAeAB6AHwAhgCTAJgAoAImAigCLQI2AkECRQJTAloCYwKGAosCjgKbAqAAAAAAAAACAQAAAAAAAAAoAAAAAAAAAAAAAAAAAAACsg==},
	Bdsk-Url-1 = {http://doi.acm.org/10.1145/1735688.1735698}}
