@article{Achlioptas2007,
author = {Achlioptas, Dimitris and Mcsherry, Frank},
doi = {10.1145/1219092.1219097},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Achlioptas, Mcsherry - 2007 - Fast computation of low-rank matrix approximations.pdf:pdf},
issn = {00045411},
journal = {Journal of the ACM},
keywords = {Singular value decomposition,low rank approximation,sampling},
month = apr,
number = {2},
pages = {9--es},
title = {{Fast computation of low-rank matrix approximations}},
url = {http://dl.acm.org/citation.cfm?id=1219092.1219097},
volume = {54},
year = {2007}
}
@article{Agarwal1994,
abstract = {In this paper, we propose a scheme for matrix-matrix multiplication on a distributed-memory parallel computer. The scheme hides almost all of the communication cost with the computation and uses the standard, optimized Level-3 BLAS operation on each node. As a result, the overall performance of the scheme is nearly equal to the performance of the Level-3 optimized BLAS operation times the number of nodes in the computer, which is the peak performance obtainable for parallel BLAS. Another feature of our algorithm is that it can give peak performance for larger matrices, even if the underlying communication network of the computer is slow.},
author = {Agarwal, Ramesh C. and Gustavson, Fred G. and Zubair, Mohammad},
doi = {10.1147/rd.386.0673},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Agarwal, Gustavson, Zubair - 1994 - A high-performance matrix-multiplication algorithm on a distributed-memory parallel computer, using overlapped communication.pdf:pdf},
issn = {0018-8646},
journal = {IBM Journal of Research and Development},
keywords = {printed},
mendeley-tags = {printed},
month = nov,
number = {6},
pages = {673--681},
title = {{A high-performance matrix-multiplication algorithm on a distributed-memory parallel computer, using overlapped communication}},
url = {charm.cs.uiuc.edu/~bhatele/phd/matmul/agarwal1.pdf http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5389494},
volume = {38},
year = {1994}
}
@article{Aggarwal1990,
abstract = {We propose a model, LPRAM, for parallel random access machines with local memory that captures both the communication and computational requirements in parallel computation. For this model, we present several interesting results, including the following: Two n n matrices can be multiplied in 0( n 3 p computation time and communication steps using p processors (for ). Furthermore, these bounds are optimal for arithmetic on semirings (using only). It is shown that any algorithm that uses comparisons only and that sorts n words requires $\Omega$( n log n p log( n p communication steps for 1 < p < n . We also provide an algorithm that sorts n words and uses (-)( n log n p computation time and (-)( n log n p log( n p communication steps. These bounds also apply for computing an n -point FFT graph. It is shown that computing any binary tree $\tau$ with n nodes and height h requires $\Omega$( n p + log n + h communication steps, and can always be computed in 0( n p + min( n , h steps. We also present a simple linear-time algorithm that generates a schedule for computing $\tau$ in at most 2 D opt ($\tau$) steps, where D opt ($\tau$) represents the minimum communication delay for computing $\tau$. It is also shown that various problems that are expressed as DAGs exhibit a communication-delay/computation-time trade-off.},
author = {Aggarwal, Alok and Chandra, Ashok K and Snir, Marc},
doi = {10.1016/0304-3975(90)90188-N},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Aggarwal, Chandra, Snir - 1990 - Communication complexity of PRAMs.pdf:pdf;:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Aggarwal, Chandra, Snir - 1990 - Communication complexity of PRAMs(2).pdf:pdf},
issn = {03043975},
journal = {Theoretical Computer Science},
number = {1},
pages = {3--28},
publisher = {Elsevier},
title = {{Communication complexity of PRAMs}},
url = {http://linkinghub.elsevier.com/retrieve/pii/030439759090188N},
volume = {71},
year = {1990}
}
@article{Aggarwal2001,
abstract = {The outlier detection problem has important applications in the field of fraud detection, network robustness analysis, and intrusion detection. Most such applications are high dimensional domains in which the data can contain hundreds of dimensions. Many recent algorithms use concepts of proximity in order to find outliers based on their relationship to the rest of the data. However, in high dimensional space, the data is sparse and the notion of proximity fails to retain its meaningfulness. In ...},
annote = {Andreas is god},
author = {Aggarwal, Charu C and Yu, Philip S},
doi = {10.1145/376284.375668},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Aggarwal, Yu - 2001 - Outlier detection for high dimensional data.pdf:pdf},
isbn = {1581133324},
issn = {01635808},
journal = {ACM SIGMOD Record},
number = {2},
pages = {37--46},
publisher = {ACM},
title = {{Outlier detection for high dimensional data}},
url = {http://portal.acm.org/citation.cfm?doid=376284.375668},
volume = {30},
year = {2001}
}
@article{Ajwani2009,
abstract = {We consider the problem of Breadth First Search (BFS) traversal on massive sparse undi- rected graphs. Despite the existence of simple linear time algorithms in the RAM model, it was considered non-viable for massive graphs because of the I/O cost it incurs. Munagala and Ranade [29] and later Mehlhorn and Meyer [27] gave eﬃcient algorithms (refered to as MR BFS and MM BFS, respectively) for computing BFS level decompositions in an external memory model. Ajwani et al. [3] implemented MR BFS and the randomized variant of MM BFS using the external memory library STXXL and gave a comparative study of the two algorithms on vari- ous graph classes. In this paper, we review and extend that result demonstrating the eﬀectiveness and viability of the BFS implementations on various other synthetic and real world benchmarks. Furthermore, we present the implementation of the deterministic variant of MM BFS and show that in most cases, it outperforms the randomized variant.},
author = {Ajwani, Deepak and Dementiev, Roman and Osipov, Vitaly and Meyer, Ulrich},
editor = {Demetrescu, Camil and Goldberg, Andrew V and Johnson, David S},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Ajwani et al. - 2009 - Breadth first search on massive graphs.pdf:pdf},
isbn = {9780821843833},
journal = {The shortest path problem ninth DIMACS implemenation challenge},
pages = {291--307},
publisher = {AMS},
series = {DIMACS Series on Disrecte Mathematics and Theoretical Computer Science},
title = {{Breadth first search on massive graphs}},
volume = {74},
year = {2009}
}
@unpublished{Albers,
abstract = {The mini-course on competitive online algorithms consisted of three lectures. In the rst lecture we gave basic de nitions and presented important techniques that are used in the study on online algorithms. The paging problem was always the running example to explain and illustrate the material. We also discussed the k-server problem, which is a very well-studied generalization of the paging problem. The second lecture was concerned with self-organizing data structures, in particular self-orga- nizing linear lists. We presented results on deterministic and randomized online algorithms. Furthermore, we showed that linear lists can be used to build very e ective data compression schemes and reported on theoretical as well as experimental results. In the third lecture we discussed three application areas in which interesting online problems arise. The areas were (1) distributed data management, (2) scheduling and load balancing, and (3) robot navigation and exploration. In each of these elds we gave some important results.},
address = {Saarbr\"{u}cken},
author = {Albers, Susanne},
doi = {10.1.1.107.8676},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Albers - 1996 - Competitive Online Algorithms.pdf:pdf},
institution = {MPI},
keywords = {Lecture Notes},
pages = {56},
title = {{Competitive Online Algorithms}},
year = {1996}
}
@article{Albers1999,
abstract = {Over the past twelve years, online algorithms have received considerable research interest. Online problems had been investigated already in the seventies and early eighties but an extensive, systematic study started only when Sleator and Tarjan 41 suggested comparing an online algorithm to an optimal offline algorithm and Karlin, Manasse, Rudolph and Sleator 29 coined the term competitive analysis.},
author = {Albers, Susanne and Leonardi, S},
doi = {10.1145/333580.333583},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Albers - 1996 - Competitive Online Algorithms.pdf:pdf},
issn = {03600300},
journal = {ACM Computing Surveys},
number = {3es},
pages = {4--es},
publisher = {ACM},
series = {DIMACS Series in Discrete Mathematics and Theoretical Computer Science},
title = {{Competetive Online Algorithms}},
url = {http://portal.acm.org/citation.cfm?doid=333580.333583},
volume = {31},
year = {1999}
}
@article{Alon2002,
abstract = {It is shown that for every 1≤ s ≤ n , the probability that the s -th largest eigenvalue of a random symmetric n -by- n matrix with independent random entries of absolute value at most 1 deviates from its median by more than t is at most 4 e − t 2 32 s 2 . The main ingredient in the proof is Talagrand’s Inequality for concentration of measure in product spaces.},
archivePrefix = {arXiv},
arxivId = {math-ph/0009032v1},
author = {Alon, Noga and Krivelevich, Michael and Vu, Van H.},
doi = {10.1007/BF02785860},
eprint = {0009032v1},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Alon, Krivelevich, Vu - 2002 - On the concentration of eigenvalues of random symmetric matrices.pdf:pdf},
issn = {0021-2172},
journal = {Israel Journal of Mathematics},
keywords = {Mathematics and Statistics},
month = dec,
number = {1},
pages = {259--267},
primaryClass = {math-ph},
publisher = {Hebrew University Magnes Press},
title = {{On the concentration of eigenvalues of random symmetric matrices}},
url = {http://www.springerlink.com/content/w4w35ug0r5373164/},
volume = {131},
year = {2002}
}
@inproceedings{Alonso2010,
abstract = {Two strategies of distribution of computations can be used to implement parallel solvers for dense linear algebra problems for Heterogeneous Computational Clusters of Multicore Processors (HCoMs). These strategies are called Heterogeneous Process Distribution Strategy (HPS) and Heterogeneous Data Distribution Strategy (HDS). They are not novel and have been researched thoroughly. However, the advent of multicores necessitates enhancements to them. In this paper, we present these enhancements. Our study is based on experiments using six applications to perform Parallel Matrix-matrix Multiplication (PMM) on an HCoM employing the two distribution strategies.},
author = {Alonso, Pedro and Reddy, Ravi and Lastovetsky, Alexey},
booktitle = {Parallel Distributed and NetworkBased Processing PDP 2010 18th Euromicro International Conference on},
doi = {10.1109/PDP.2010.52},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Alonso, Reddy, Lastovetsky - 2010 - Experimental Study of Six Different Implementations of Parallel Matrix Multiplication on Heterogeneous Computational Clusters of Multicore Processors.pdf:pdf},
isbn = {9781424456727},
issn = {10666192},
keywords = {printed},
mendeley-tags = {printed},
pages = {263--270},
publisher = {IEEE},
title = {{Experimental Study of Six Different Implementations of Parallel Matrix Multiplication on Heterogeneous Computational Clusters of Multicore Processors}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5452460},
year = {2010}
}
@article{Alpern1994,
abstract = {Memory Hierarchy (UMH) model introduced in this paper captures performance-relevant aspects of the hierarchical nature of computer memory. It is used to quantify architectural requirements of several algorithms and to ratify the faster speeds achieved by tuned implementations that use improved data-movement strategies. A sequential computer's memory is modeled as a sequence M 0,M 1,... of increasingly large memory modules. Computation takes place inM 0. Thus,M 0 might model a computer's central processor, whileM 1 might be cache memory,M 2 main memory, and so on. For each moduleM u, a busB u connects it with the next larger module Mu+1. All buses may be active simultaneously. Data is transferred along a bus in fixed-sized blocks. The size of these blocks, the time required to transfer a block, and the number of blocks that fit in a module are larger for modules farther from the processor. The UMH model is parametrized by the rate at which the blocksizes increase and by the ratio of the blockcount to the blocksize. A third parameter, the transfer-cost (inverse bandwidth) function, determines the time to transfer blocks at the different levels of the hierarchy. UMH analysis refines traditional methods of algorithm analysis by including the cost of data movement throughout the memory hierarchy. Thecommunication efficiency of a program is a ratio measuring the portion of UMH running time during which M0 is active. An algorithm that can be implemented by a program whose communication efficiency is nonzero in the limit is said to becommunication- efficient. The communication efficiency of a program depends on the parameters of the UMH model, most importantly on the transfer-cost function. Athreshold function separates those transfer-cost functions for which an algorithm is communication-efficient from those that are too costly. Threshold functions for matrix transpose, standard matrix multiplication, and Fast Fourier Transform algorithms are established by exhibiting communication-efficient programs at the threshold and showing that more expensive transfer-cost functions are too costly. A parallel computer can be modeled as a tree of memory modules with computation occurring at the leaves. Threshold functions are established for multiplication ofNN matrices using up to N2 processors in a tree with constant branching factor.},
author = {Alpern, Bowen and Carter, Larry and Feig, Ephraim and Selker, Ted},
doi = {10.1007/BF01185206},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Alpern et al. - 1994 - The uniform memory hierarchy model of computation.pdf:pdf},
issn = {01784617},
journal = {Algorithmica},
keywords = {printed},
mendeley-tags = {printed},
number = {2-3},
pages = {72--109},
title = {{The uniform memory hierarchy model of computation}},
url = {http://www.springerlink.com/index/10.1007/BF01185206},
volume = {12},
year = {1994}
}
@inproceedings{Aluru,
abstract = {In modern clustering environments where the memory hierarchy has many layers (distributed memory, shared memory layer, cache, ...), an important question is how to fully utilize all available resources and identify the most dominant layer in certain computation. When combining algorithms on all layers together, what would be the best method to get the best performance out of all the resources we have? The mixed mode programming model that uses thread programming on the shared memory layer and message passing programming on the distributed memory layer is a method that many researchers are using to utilize the memory resources. We take an algorithmic approach that uses matrix multiplication as a tool to show how cache algorithms affect the performance of both shared memory and distributed memory algorithms. We show that with good underlying cache algorithm, overall performance is stable. When the underlying cache algorithm is bad, superlinear speedup may occur and increasing number of threads may also improve performance.},
author = {Aluru, Srinivas},
booktitle = {Proceedings. IEEE International Conference on Cluster Computing},
doi = {10.1109/CLUSTR.2002.1137747},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Aluru - 2002 - Mixed mode matrix multiplication.pdf:pdf},
isbn = {0-7695-1745-5},
pages = {195--203},
publisher = {IEEE Comput. Soc},
title = {{Mixed mode matrix multiplication}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=1137747},
year = {2002}
}
@article{Badr2008,
author = {Amr, Badr and Aref, Ibtehal M . and Hussien, Basma M . and Yosr, Eman},
journal = {Journal of Computer Science},
keywords = {Algorithms,elitism based compact genetic algorithm,elitist compact genetic algorithm,estimation distribution algorithm,non persistent elitist compact genetic,persistent},
number = {7},
pages = {1125--1129},
publisher = {Citeseer},
title = {{Solving Protein Folding Problem using Elitism-Based Compact Genetic Algorithm}},
url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.165.8121\&amp;rep=rep1\&amp;type=pdf},
volume = {4},
year = {2008}
}
@article{Andreopoulos2009,
abstract = {Clustering is ubiquitously applied in bioinformatics with hierarchical clustering and k-means partitioning being the most popular methods. Numerous improvements of these two clustering methods have been introduced, as well as completely different approaches such as grid-based, density-based and model-based clustering. For improved bioinformatics analysis of data, it is important to match clusterings to the requirements of a biomedical application. In this article, we present a set of desirable clustering features that are used as evaluation criteria for clustering algorithms. We review 40 different clustering algorithms of all approaches and datatypes. We compare algorithms on the basis of desirable clustering features, and outline algorithms' benefits and drawbacks as a basis for matching them to biomedical applications.},
author = {Andreopoulos, Bill and An, Aijun and Wang, Xiaogang and Schroeder, Michael},
doi = {10.1093/bib/bbn058},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Andreopoulos et al. - 2009 - A roadmap of clustering algorithms finding a match for a biomedical application.pdf:pdf},
issn = {1477-4054},
journal = {Briefings in bioinformatics},
keywords = {Algorithms,Cluster Analysis,Computational Biology,Gene Expression Profiling,Gene Expression Profiling: methods,Information Storage and Retrieval,Models,Protein Interaction Mapping,Statistical,printed},
mendeley-tags = {printed},
month = may,
number = {3},
pages = {297--314},
pmid = {19240124},
title = {{A roadmap of clustering algorithms: finding a match for a biomedical application.}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/19240124},
volume = {10},
year = {2009}
}
@article{Angiulli,
abstract = {A novel algorithm, named DESCRY, for clustering very large multidimensional data sets with numerical attributes is presented. DESCRY discovers clusters having different shape, size, and density and when data contains noise by first finding and clustering a small set of points, called meta-points, that well depict the shape of clusters present in the data set. Final clusters are obtained by assigning each point to one of the partial clusters. The computational complexity of DESCRY is linear both in the data set size and in the data set dimensionality. Experiments show the very good qualitative results obtained comparable with those obtained by state of the art clustering algorithms.},
address = {Berlin, Heidelberg},
author = {Angiulli, Fabrizio and Pizzuti, Clara and Ruffolo, Massimo},
doi = {10.1007/b99975},
editor = {Yang, Zheng Rong and Yin, Hujun and Everson, Richard M.},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Angiulli, Pizzuti, Ruffolo - 2004 - DESCRY A density based clustering algorithm for very large data sets.pdf:pdf},
isbn = {978-3-540-22881-3},
journal = {Intelligent Data Engineering and Automated Learning },
pages = {203--210},
publisher = {Springer Berlin Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{DESCRY: A density based clustering algorithm for very large data sets}},
url = {http://www.springerlink.com/index/10.1007/b99975},
volume = {3177},
year = {2004}
}
@article{Arge2007,
author = {Arge, Lars and Bender, Michael A and Demaine, Erik D and Holland-Minkley, Bryan and Munro, J. Ian},
doi = {10.1137/S0097539703428324},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Arge et al. - 2007 - An Optimal Cache-Oblivious Priority Queue and Its Application to Graph Algorithms.pdf:pdf},
issn = {00975397},
journal = {SIAM Journal on Computing},
keywords = {printed},
mendeley-tags = {printed},
number = {6},
pages = {1672},
title = {{An Optimal Cache-Oblivious Priority Queue and Its Application to Graph Algorithms}},
url = {http://link.aip.org/link/SMJCAT/v36/i6/p1672/s1\&Agg=doi},
volume = {36},
year = {2007}
}
@article{Arifuzzaman2006,
abstract = {Protein-protein interactions play key roles in protein function and the structural organization of a cell. A thorough description of these interactions should facilitate elucidation of cellular activities, targeted-drug design, and whole cell engineering. A large-scale comprehensive pull-down assay was performed using a His-tagged Escherichia coli ORF clone library. Of 4339 bait proteins tested, partners were found for 2667, including 779 of unknown function. Proteins copurifying with hexahistidine-tagged baits on a Ni2+-NTA column were identified by MALDI-TOF MS (matrix-assisted laser desorption ionization time of flight mass spectrometry). An extended analysis of these interacting networks by bioinformatics and experimentation should provide new insights and novel strategies for E. coli systems biology.},
author = {Arifuzzaman, Mohammad and Maeda, Maki and Itoh, Aya and Nishikata, Kensaku and Takita, Chiharu and Saito, Rintaro and Ara, Takeshi and Nakahigashi, Kenji and Huang, Hsuan-Cheng and Hirai, Aki and Tsuzuki, Kohei and Nakamura, Seira and Altaf-Ul-Amin, Mohammad and Oshima, Taku and Baba, Tomoya and Yamamoto, Natsuko and Kawamura, Tomoyo and Ioka-Nakamichi, Tomoko and Kitagawa, Masanari and Tomita, Masaru and Kanaya, Shigehiko and Wada, Chieko and Mori, Hirotada},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Arifuzzaman et al. - 2006 - Large-scale identification of protein-protein interaction of Escherichia coli K-12.pdf:pdf},
institution = {Graduate School of Biological Sciences, Nara Institute of Science and Technology, Ikoma, Nara 630-0101, Japan.},
journal = {Genome Research},
keywords = {biological,escherichia coli k12,escherichia coli k12 chemistry,escherichia coli proteins,escherichia coli proteins chemistry,escherichia coli proteins metabolism,gene library,histidine,histidine chemistry,mass,matrix assisted laser desorpti,models,open reading frames,proteome,proteome analysis,proteomics,spectrometry},
number = {5},
pages = {686--691},
publisher = {Cold Spring Harbor Laboratory Press},
title = {{Large-scale identification of protein-protein interaction of Escherichia coli K-12.}},
url = {http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=1457052\&tool=pmcentrez\&rendertype=abstract},
volume = {16},
year = {2006}
}
@article{Armah2007,
abstract = {"Protein folding is defined as a process by which a polypeptide chain performs a search in conformational space with the objective of achieving the so-called native conformation to global free-energy minimum under a given set of physiochemical conditions of the medium." Misfolding then, is the process by which this objective is not achieved. Protein Folding Quality Assessment (PFQA), is characterized by a three-parameter distribution function Phi(T) referred to as the PFQA function. It uses results of protein folding processes to assess the output quality of protein folding. Protein misfolding is implicated in the initial cause of many conformational diseases. Folding of cytosolic protein can be regarded as the performance of the protein after it is produced or manufactured by the synthesis processes. Protein folding through different mechanisms and pathways has been extensively covered in J.D. Bryngelson, P.G. Wolynes, Spin glass and statistical mechanics of protein folding, Proc. Natl. Acad. Sci. USA 84 (1987) 7524; J. Wang, Statistics, pathways and dynamics of single molecule folding, J. Chem. Phys. 118 (2) (2003) 953; N.D. Socci, J.N. Onuchic, P.G. Wolynes, Diffusive dynamics of the reaction coordinates for protein folding funnels, J. Chem. Phys. 104 (14) (1996); D. Thirumalai, From minimal models to real proteins, time scales for protein folding kinetics, J. Phys. I France 5 (1995) 1457. The model is based on growth models of Ratkowsky, Richards, etc. D.A. Ratkowski, T.J. Reeds, Choosing near-linear parameters logistic model for radio-ligand and related assays, Biometrics 42 (1986) 575 for a three-parameters model to handle the quality assessment of the folding process. Thus a complete distribution can be found, thanks to the scale, location and shape parameters.},
author = {Armah, Ebenezer O},
institution = {University of Illinois at Chicago, Department of Mathematics, Statistics and Computer Science, SSR 1409B, 809 South Damen, Chicago, IL 60612, USA. earmah1@uic.edu},
journal = {Mathematical Biosciences},
number = {1},
pages = {1--25},
pmid = {17157330},
title = {{Mathematics of protein pathological misfolding.}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/17157330},
volume = {208},
year = {2007}
}
@article{Aspnas2007,
abstract = {In certain applications the non-zero elements of large sparse matrices are formed by adding several smaller contributions in random order before the final values of the elements are known. For some sparse matrix representations this procedure is laborious. We present an efficient method for assembling large irregular sparse matrices where the non-zero elements have to be assembled by adding together contributions and updating the individual elements in random order. A sparse matrix is stored in a hash table, which allows an efficient method to search for an element. Measurements show that for a sparse matrix with random elements the hash-based representation performs almost 7 times faster than the compressed row format (CRS) used in the PETSc library. Once the sparse matrix has been assembled we transfer the matrix to e.g. CRS for matrix manipulations.},
address = {Berlin, Heidelberg},
annote = {Deals with dynamically updated matrices. },
author = {Aspn\"{a}s, Mats and Signell, Artur and Westerholm, Jan and K\aa gstr\"{o}m, Bo and Elmroth, Erik and Dongarra, Jack and Wasniewski, Jerzy},
doi = {10.1007/978-3-540-75755-9},
editor = {K\aa gstr\"{o}m, Bo and Elmroth, Erik and Dongarra, Jack and Wa\'{s}niewski, Jerzy},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Aspn\"{a}s et al. - 2007 - Efficient Assembly of Sparse Matrices Using Hashing.pdf:pdf},
isbn = {978-3-540-75754-2},
journal = {Lecture Notes in Computer Science},
keywords = {Computer Science,printed},
mendeley-tags = {printed},
pages = {900--907},
publisher = {Springer Berlin Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{Efficient Assembly of Sparse Matrices Using Hashing}},
url = {http://www.springerlink.com/content/m841g5212j685r05/},
volume = {4699},
year = {2007}
}
@article{Asur2007,
abstract = {Protein-Protein Interaction (PPI) networks are believed to be important sources of information related to biological processes and complex metabolic functions of the cell. The presence of biologically relevant functional modules in these networks has been theorized by many researchers. However, the application of traditional clustering algorithms for extracting these modules has not been successful, largely due to the presence of noisy false positive interactions as well as specific topological challenges in the network.},
author = {Asur, Sitaram and Ucar, Duygu and Parthasarathy, Srinivasan},
doi = {10.1093/bioinformatics/btm212},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Asur, Ucar, Parthasarathy - 2007 - An ensemble framework for clustering protein-protein interaction networks.pdf:pdf},
issn = {1367-4811},
journal = {Bioinformatics (Oxford, England)},
keywords = {Biological,Cluster Analysis,Computer Simulation,Models,Multigene Family,Multigene Family: physiology,Principal Component Analysis,Protein Interaction Mapping,Protein Interaction Mapping: methods,Proteome,Proteome: metabolism,Signal Transduction,Signal Transduction: physiology,printed},
mendeley-tags = {printed},
month = jul,
number = {13},
pages = {i29--40},
pmid = {17646309},
title = {{An ensemble framework for clustering protein-protein interaction networks.}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/17646309},
volume = {23},
year = {2007}
}
@book{Bader2011,
abstract = {Graphs are among the most important abstract data types in computer science, and the algorithms that operate on them are critical to modern life. Graphs have been shown to be powerful tools for modeling complex problems because of their simplicity and generality. Graph algorithms are one of the pillars of mathematics, informing research in such diverse areas as combinatorial optimization, complexity theory, and topology. Algorithms on graphs are applied in many ways in today s world - from Web rankings to metabolic networks, from finite element meshes to semantic graphs. The current exponential growth in graph data has forced a shift to parallel computing for executing graph algorithms. Implementing parallel graph algorithms and achieving good parallel performance have proven difficult. This book addresses these challenges by exploiting the well-known duality between a canonical representation of graphs as abstract collections of vertices and edges and a sparse adjacency matrix representation. This linear algebraic approach is widely accessible to scientists and engineers who may not be formally trained in computer science. The authors show how to leverage existing parallel matrix computation techniques and the large amount of software infrastructure that exists for these computations to implement efficient and scalable parallel graph algorithms. The benefits of this approach are reduced algorithmic complexity, ease of implementation, and improved performance. Graph Algorithms in the Language of Linear Algebra is the first book to cover graph algorithms accessible to engineers and scientists not trained in computer science but having a strong linear algebra background, enabling them to quickly understand and apply graph algorithms. It also covers array-based graph algorithms, showing readers how to express canonical graph algorithms using a highly elegant and efficient array notation and how to tap into the large range of tools and techniques that have been built for matrices and tensors; parallel array-based algorithms, demonstrating with examples how to easily implement parallel graph algorithms using array-based approaches, which enables readers to address much larger graph problems; and array-based theory for analyzing graphs, providing a template for using array-based constructs to develop new theoretical approaches for graph analysis.},
author = {Bader, Michael and Bliss, Nadya and Bond, Robert and Bulu\c{c}, Aydın and Dunlavy, Daniel M. and Edelman, Alan and Faloutsos, Christos and Fineman, Jeremy T. and Madduri, Kamesh and Leskovec, Jure and Kolda, Tamara G. and Kepner, Jeremy and Kegelmeyer, W. Philip and Hendrickson, Bruce and Heitsch, Christine E. and Gilbert, John R. and Mohindra, Sanjeev and Nguyen, Huy and Rader, Charles M. and Reinhardt, Steve and Robinson, Eric and Shah, Viral B.},
doi = {http://dx.doi.org/10.1137/1.9780898719918},
editor = {Kepner, Jeremy and Gilbert, John R.},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Bader et al. - 2011 - Graph Algorithms in the Language of Linear Algebra.pdf:pdf},
isbn = {9780898719901},
pages = {389},
publisher = {SIAM},
title = {{Graph Algorithms in the Language of Linear Algebra}},
year = {2011}
}
@article{Bader2008,
abstract = {We will present hardware-oriented implementations of block-recursive approaches for matrix operations, esp. matrix multiplication and LU decomposition. An element order based on a recursively constructed Peano space-filling curve is used to store the matrix elements. This block-recursive numbering scheme is changed into a standard row-major order, as soon as the respective matrix subblocks fit into level-1 cache. For operations on these small blocks, we implemented hardware-oriented kernels optimised for Intel’s Core architecture. The resulting matrix-multiplication and LU-decomposition codes compete well with optimised libraries such as Intel’s MKL, ATLAS, or GotoBLAS, but have the advantage that only comparably small and well-defined kernel operations have to be optimised to achieve high performance.},
address = {Berlin, Heidelberg},
author = {Bader, Michael and Franz, Robert and G\"{u}nther, Stephan and Heinecke, Alexander and Wyrzykowski, Roman and Dongarra, Jack and Karczewski, Konrad and Wasniewski, Jerzy},
doi = {10.1007/978-3-540-68111-3\_66},
editor = {Wyrzykowski, Roman and Dongarra, Jack and Karczewski, Konrad and Wasniewski, Jerzy},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Bader et al. - 2008 - Hardware-Oriented Implementation of Cache Oblivious Matrix Operations Based on Space-Filling Curves.pdf:pdf},
isbn = {978-3-540-68105-2},
journal = {Lecture Notes in Computer Science},
keywords = {Computer Science,printed},
mendeley-tags = {printed},
pages = {628--638},
publisher = {Springer Berlin Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{Hardware-Oriented Implementation of Cache Oblivious Matrix Operations Based on Space-Filling Curves}},
url = {http://www.springerlink.com/content/l63414747k616103/},
volume = {4967},
year = {2008}
}
@article{Bader2006a,
annote = {Interesting to gain spatial and temporal locality by laying out the data in specific order in memory. 
However is not "processor - oblivious"
      },
author = {Bader, Michael and Mayer, Christian},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Bader, Mayer - 2006 - Cache oblivious matrix operations using Peano curves.pdf:pdf},
isbn = {3-540-75754-6, 978-3-540-75754-2},
keywords = {printed},
mendeley-tags = {printed},
month = jun,
pages = {521--530},
title = {{Cache oblivious matrix operations using Peano curves}},
url = {http://dl.acm.org/citation.cfm?id=1775059.1775135},
year = {2006}
}
@inproceedings{Bader2009,
abstract = {The ratio between processor and main memory performance has been increasing since quite some time, and can safely be expected to do so throughout the oncoming years. In the era of single-core processors, this was mainly observable by increased latency, for example when measured in number of (possibly stalled) CPU clock cycles. Nowadays, with multicore chips, multiple cores share the same connection to off-chip main memory, which effectively reduces available bandwidth, as well. Caches help in both cases: they provide both a much lower latency and a much higher bandwidth by being located on-chip. By holding copies of least recently used memory blocks, caches exploit the fact that programs on the average access memory in ways that often access the same memory cell (temporal locality), or nearby memory cells (spatial locality). However, this natural locality is not enough for scientific computing in HPC. Further improving any existing access locality of given algorithms is very much wanted. In this talk, we present strategies to improve the locality of memory accesses for linear algebra problems occurring in different kinds of applications: (1) an algorithmic approach based on Peano spacefilling curves that leads to inherently cache efficient (cache oblivious) matrix algorithms, such as matrix multiplication or LU decomposition for dense and sparse matrices - on single-core CPUs, as well as in the context of shared-memory multicore platforms. (2) cache optimization strategies for matrix-vector multiplications with very large, sparse matrices, as they occur in the iterative MLEM algorithm, which is used for image reconstruction in nuclear medicine. Here, different cache-aware optimization strategies are combined in order to better exploit large caches, small caches, and single cache lines.},
author = {Bader, Michael and Weidendorfer, Josef},
booktitle = {2009 International Conference on High Performance Computing \& Simulation},
doi = {10.1109/HPCSIM.2009.5192891},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Bader, Weidendorfer - 2009 - Exploiting memory hierarchies in scientific computing.pdf:pdf},
isbn = {978-1-4244-4906-4},
keywords = {printed},
mendeley-tags = {printed},
month = jun,
pages = {33--35},
publisher = {IEEE},
title = {{Exploiting memory hierarchies in scientific computing}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5192891 http://ieeexplore.ieee.org/xpl/freeabs\_all.jsp?arnumber=5192891},
year = {2009}
}
@article{Bader2006,
abstract = {Cache based attacks (CBA) exploit the different access times of main memory and cache memory to determine information about internal states of cryptographic algorithms. CBAs turn out to be very powerful attacks even in practice. In this paper we present a general and strong model to analyze the security against CBAs. We introduce the notions of information leakage and resistance to analyze the security of several implementations of AES. Furthermore, we analyze how to use random permutations to protect against CBAs. By providing a successful attack on an AES implementation protected by random permutations we show that random permutations used in a straightforward manner are not enough to protect against CBAs. Hence, to improve upon the security provided by random permutations, we describe the property a permutation must have in order to prevent the leakage of some key bits through CBAs.},
address = {Berlin, Heidelberg},
author = {Bader, Michael and Zenger, Christoph},
doi = {10.1007/11752578},
editor = {Wyrzykowski, Roman and Dongarra, Jack and Meyer, Norbert and Wa\'{s}niewski, Jerzy},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Bader, Zenger - 2006 - A Cache Oblivious Algorithm for Matrix Multiplication Based on Peano’s Space Filling Curve.pdf:pdf},
isbn = {978-3-540-34141-3},
journal = {Parallel Processing and Applied Mathematics},
keywords = {not printed},
mendeley-tags = {not printed},
pages = {1042--1049},
publisher = {Springer Berlin Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{A Cache Oblivious Algorithm for Matrix Multiplication Based on Peano’s Space Filling Curve}},
url = {http://www.springerlink.com/index/10.1007/11752578},
volume = {3911},
year = {2006}
}
@book{Bal2007,
abstract = {Java for Bioinformatics and Biomedical Applications describes the work of the U.S. National Cancer Institute (NCI, National Institutes of Health, U.S. Department of Health and Human Services) and a large number of cancer centers across the U.S. under the caBIG™ (cancer Biomedical Informatics Grid) program, as well as standard bioinformatics applications. The goal of NCI caBIG™ is to create a standards based, interoperable network of individuals, applications and data to enhance the pace of cancer research. CaBIG™ uses J2EE and open source standards for all software development work. This book examines the tools and technologies being developed under caBIG™ to meet the goal of eliminating suffering and death from cancer by 2015 as formulated by the former NCI Director, Dr. Andrew von Eschenbach.In doing so, it provides a vignette into the efforts of thousands of people – molecular biologists, medical practitioners, software developers, to name a few - across the country to bring the promise of translational research to individuals with cancer. From a software perspective, a functional approach is used to teach the Java platform and its features for enterprise-level application development. Under this approach, the various syntactical and operative elements of the language and any software libraries that have been used (for example, BioJava, Apache, etc.) are taught not in isolation but in the context of discrete definable research problems that enable the user to relate how the different parts of the language fit together in the big picture. All examples are derived from practical problems faced in biomedical/clinical data retrieval and analysis during routine bioinformatics and cancer research. Further, the book illustrates how individual bioinformatics applications (such as BLAST and Genscan) can be stitched together into a pipeline so that users can direct the output of one tool (for example, gene predictions using Genscan) to perform further analysis (say, homology searching using BLAST).},
author = {Bal, Harshawardhan and Hujol, Johnny},
edition = {1},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Bal, Hujol - 2007 - Java for Bioinformatics and Biomedical Applications.pdf:pdf},
isbn = {9781441942456},
pages = {360},
publisher = {Springer},
title = {{Java for Bioinformatics and Biomedical Applications}},
year = {2007}
}
@article{Ballard2012a,
abstract = {A parallel algorithm has perfect strong scaling if its running time on P processors is linear in 1/P, including all communication costs. Distributed-memory parallel algorithms for matrix multiplication with perfect strong scaling have only recently been found. One is based on classical matrix multiplication (Solomonik and Demmel, 2011), and one is based on Strassen's fast matrix multiplication (Ballard, Demmel, Holtz, Lipshitz, and Schwartz, 2012). Both algorithms scale perfectly, but only up to some number of processors where the inter-processor communication no longer scales. We obtain a memory-independent communication cost lower bound on classical and Strassen-based distributed-memory matrix multiplication algorithms. These bounds imply that no classical or Strassen-based parallel matrix multiplication algorithm can strongly scale perfectly beyond the ranges already attained by the two parallel algorithms mentioned above. The memory-independent bounds and the strong scaling bounds generalize to other algorithms.},
archivePrefix = {arXiv},
arxivId = {1202.3177},
author = {Ballard, Grey and Demmel, James and Holtz, Olga and Lipshitz, Benjamin and Schwartz, Oded},
eprint = {1202.3177},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Ballard et al. - 2012 - Strong Scaling of Matrix Multiplication Algorithms and Memory-Independent Communication Lower Bounds.pdf:pdf},
keywords = {printed},
mendeley-tags = {printed},
month = feb,
pages = {4},
title = {{Strong Scaling of Matrix Multiplication Algorithms and Memory-Independent Communication Lower Bounds}},
url = {http://arxiv.org/abs/1202.3177},
year = {2012}
}
@article{Ballard2012,
abstract = {Parallel matrix multiplication is one of the most studied fundamental problems in distributed and high performance computing. We obtain a new parallel algorithm that is based on Strassen's fast matrix multiplication and minimizes communication. The algorithm outperforms all known parallel matrix multiplication algorithms, classical and Strassen-based, both asymptotically and in practice. A critical bottleneck in parallelizing Strassen's algorithm is the communication between the processors. Ballard, Demmel, Holtz, and Schwartz (SPAA'11) prove lower bounds on these communication costs, using expansion properties of the underlying computation graph. Our algorithm matches these lower bounds, and so is communication-optimal. It exhibits perfect strong scaling within the maximum possible range. Benchmarking our implementation on a Cray XT4, we obtain speedups over classical and Strassen-based algorithms ranging from 24\% to 184\% for a fixed matrix dimension n=94080, where the number of nodes ranges from 49 to 7203. Our parallelization approach generalizes to other fast matrix multiplication algorithms.},
archivePrefix = {arXiv},
arxivId = {1202.3173},
author = {Ballard, Grey and Demmel, James and Holtz, Olga and Lipshitz, Benjamin and Schwartz, Oded},
eprint = {1202.3173},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Ballard et al. - 2012 - Communication-Optimal Parallel Algorithm for Strassen's Matrix Multiplication.pdf:pdf},
keywords = {printed},
mendeley-tags = {printed},
month = feb,
pages = {13},
title = {{Communication-Optimal Parallel Algorithm for Strassen's Matrix Multiplication}},
url = {http://arxiv.org/abs/1202.3173},
year = {2012}
}
@inproceedings{Ballard2011,
abstract = {The communication cost of algorithms (also known as I/O-complexity) is shown to be closely related to the expansion properties of the corresponding computation graphs. We demonstrate this on Strassen's and other fast matrix multiplication algorithms, and obtain first lower bounds on their communication costs. In the sequential case, where the processor has a fast memory of size \$M\$, too small to store three \$n\$-by-\$n\$ matrices, the lower bound on the number of words moved between fast and slow memory is, for many of the matrix multiplication algorithms, \$\backslash Omega((\backslash frac\{n\}\{\backslash sqrt M\})\^{}\{\backslash omega\_0\}\backslash cdot M)\$, where \$\backslash omega\_0\$ is the exponent in the arithmetic count (e.g., \$\backslash omega\_0 = \backslash lg 7\$ for Strassen, and \$\backslash omega\_0 = 3\$ for conventional matrix multiplication). With \$p\$ parallel processors, each with fast memory of size \$M\$, the lower bound is \$p\$ times smaller. These bounds are attainable both for sequential and for parallel algorithms and hence optimal. These bounds can also be attained by many fast algorithms in linear algebra (e.g., algorithms for LU, QR, and solving the Sylvester equation).},
address = {New York, New York, USA},
archivePrefix = {arXiv},
arxivId = {1109.1693},
author = {Ballard, Grey and Demmel, James and Holtz, Olga and Schwartz, Oded},
booktitle = {Proceedings of the 23rd ACM symposium on Parallelism in algorithms and architectures - SPAA '11},
doi = {10.1145/1989493.1989495},
eprint = {1109.1693},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Ballard et al. - 2011 - Graph expansion and communication costs of fast matrix multiplication.pdf:pdf},
isbn = {9781450307437},
month = sep,
pages = {1},
publisher = {ACM Press},
title = {{Graph expansion and communication costs of fast matrix multiplication}},
url = {http://arxiv.org/abs/1109.1693},
year = {2011}
}
@article{Ballard2011a,
abstract = {In 1981 Hong and Kung proved a lower bound on the amount of communication (amount of data moved between a small, fast memory and large, slow memory) needed to perform dense, n-by-n matrix multiplication using the conventional O(n3) algorithm, where the input matrices were too large to fit in the small, fast memory. In 2004 Irony, Toledo, and Tiskin gave a new proof of this result and extended it to the parallel case (where communication means the amount of data moved between processors). In both cases the lower bound may be expressed as $\Omega$(\#arithmetic\_operations/), where M is the size of the fast memory (or local memory in the parallel case). Here we generalize these results to a much wider variety of algorithms, including LU factorization, Cholesky factorization, LDLT factorization, QR factorization, the Gram–Schmidt algorithm, and algorithms for eigenvalues and singular values, i.e., essentially all direct methods of linear algebra. The proof works for dense or sparse matrices and for sequential or parallel algorithms. In addition to lower bounds on the amount of data moved (bandwidth cost), we get lower bounds on the number of messages required to move it (latency cost). We extend our lower bound technique to compositions of linear algebra operations (like computing powers of a matrix) to decide whether it is enough to call a sequence of simpler optimal algorithms (like matrix multiplication) to minimize communication, or whether we can do better. We give examples of both. We also show how to extend our lower bounds to certain graph-theoretic problems. We point out recently designed algorithms that attain many of these lower bounds.},
author = {Ballard, Grey and Demmel, James and Holtz, Olga and Schwartz, Oded},
doi = {http://dx.doi.org/10.1137/090769156},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Ballard et al. - 2011 - Minimizing communication in numerical linera algebra.pdf:pdf},
journal = {SIAM J. Matrix Analysis Applications},
keywords = {printed},
mendeley-tags = {printed},
number = {3},
pages = {866--901},
title = {{Minimizing communication in numerical linera algebra}},
url = {http://epubs.siam.org/simax/resource/1/sjmael/v32/i3/p866\_s1?isAuthorized=no},
volume = {32},
year = {2011}
}
@article{Bapat2010,
abstract = {Multiple, dissimilar genetic defects in cancers of the same origin contribute to heterogeneity in tumor phenotypes and therapeutic responses of patients, yet the associated molecular mechanisms remain elusive. Here, we show at the systems level that serous ovarian carcinoma is marked by the activation of interconnected modules associated with a specific gene set that was derived from three independent tumor-specific gene expression data sets. Network prediction algorithms combined with preestablished protein interaction networks and known functionalities affirmed the importance of genes associated with ovarian cancer as predictive biomarkers, besides "discovering" novel ones purely on the basis of interconnectivity, whose precise involvement remains to be investigated. Copy number alterations and aberrant epigenetic regulation were identified and validated as significant influences on gene expression. More importantly, three functional modules centering on c-Myc activation, altered retinoblastoma signaling, and p53/cell cycle/DNA damage repair pathways have been identified for their involvement in transformation-associated events. Further studies will assign significance to and aid the design of a panel of specific markers predictive of individual- and tumor-specific pathways. In the parlance of this emerging field, such networks of gene-hub interactions may define personalized therapeutic decisions.},
author = {Bapat, Sharmila A and Krishnan, Anagha and Ghanate, Avinash D and Kusumbe, Anjali P and Kalra, Rajkumar S},
doi = {10.1158/0008-5472.CAN-10-0447},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Bapat et al. - 2010 - Gene expression protein interaction systems network modeling identifies transformation-associated molecules and pathways in ovarian cancer.pdf:pdf},
issn = {1538-7445},
journal = {Cancer research},
keywords = {Biological,Biological: genetics,Biological: metabolism,Cell Transformation,Cells,Chromatin Immunoprecipitation,Cultured,Cystadenocarcinoma,Epigenesis,Female,Fluorescence,Gene Expression Profiling,Gene Expression Regulation,Gene Regulatory Networks,Genetic,Humans,Immunoblotting,Immunoprecipitation,In Situ Hybridization,Neoplastic,Neoplastic: genetics,Oligonucleotide Array Sequence Analysis,Ovarian Neoplasms,Ovarian Neoplasms: genetics,Ovarian Neoplasms: metabolism,Ovarian Neoplasms: pathology,Prognosis,Protein Interaction Mapping,Serous,Serous: genetics,Serous: metabolism,Serous: pathology,Signal Transduction,Tumor Markers,printed},
mendeley-tags = {printed},
month = jun,
number = {12},
pages = {4809--19},
pmid = {20530682},
title = {{Gene expression: protein interaction systems network modeling identifies transformation-associated molecules and pathways in ovarian cancer.}},
url = {http://cancerres.aacrjournals.org/cgi/content/abstract/0008-5472.CAN-10-0447v1},
volume = {70},
year = {2010}
}
@inproceedings{Bar-Yossef2002,
abstract = {We present three algorithms to count the number of distinct elements in a data stream to within a factor of 1 ± ǫ. Our algorithms improve upon known algorithms for this problem, and offer a spectrum of time/space tradeoffs.},
address = {Berlin, Heidelberg},
author = {Bar-Yossef, Ziv and Jayram, T. S. and Kumar, Ravi and Sivakumar, D. and Trevisan, Luca},
booktitle = {International Workshop on Randomization and Approximation Techniques},
doi = {10.1007/3-540-45726-7},
editor = {Rolim, Jos\'{e} D. P. and Vadhan, Salil},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Bar-Yossef et al. - 2002 - Counting Distinct Elements in a Data Stream.pdf:pdf},
isbn = {978-3-540-44147-2},
month = aug,
pages = {1--10},
publisher = {Springer Berlin Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{Counting Distinct Elements in a Data Stream.}},
url = {http://www.springerlink.com/index/10.1007/3-540-45726-7},
volume = {2483},
year = {2002}
}
@article{Basu2008a,
abstract = {Since the initial work on constrained clustering, there have been numerous advances in methods, applications, and our understanding of the theoretical properties of constraints and constrained clustering algorithms. Bringing these developments together, Constrained Clustering: Advances in Algorithms, Theory, and Applications presents an extensive collection of the latest innovations in clustering data analysis methods that use background knowledge encoded as constraints. Algorithms The first five chapters of this volume investigate advances in the use of instance-level, pairwise constraints for partitional and hierarchical clustering. The book then explores other types of constraints for clustering, including cluster size balancing, minimum cluster size,and cluster-level relational constraints. Theory It also describes variations of the traditional clustering under constraints problem as well as approximation algorithms with helpful performance guarantees. Applications The book ends by applying clustering with constraints to relational data, privacy-preserving data publishing, and video surveillance data. It discusses an interactive visual clustering approach, a distance metric learning approach, existential constraints, and automatically generated constraints. With contributions from industrial researchers and leading academic experts who pioneered the field, this volume delivers thorough coverage of the capabilities and limitations of constrained clustering methods as well as introduces new types of constraints and clustering algorithms.},
author = {Basu, Sugato and Davidson, Ian and Wagstaff, Kiri},
doi = {10.1007/BF02884971},
editor = {Basu, Sugato and Davidson, Ian and Wagstaff, Kiri},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Basu, Davidson, Wagstaff - 2008 - Constrained Clustering Advances in Algorithms, Theory, and Applications, 1 edition.pdf:pdf},
isbn = {9781584889960},
issn = {10016538},
journal = {Chinese Science Bulletin},
number = {11},
pages = {961--970},
publisher = {Chapman \& Hall},
title = {{Constrained Clustering: Advances in Algorithms, Theory, and Applications, 1 edition}},
url = {http://www.amazon.com/dp/1584889969 http://portal.acm.org/citation.cfm?id=1404506\#},
volume = {45},
year = {2008}
}
@article{Beaumont2001,
abstract = {We address the issue of implementing matrix multiplication on heterogeneous platforms. We target two different classes of heterogeneous computing resources: heterogeneous networks of workstations and collections of heterogeneous clusters. Intuitively, the problem is to load balance the work with different speed resources while minimizing the communication volume. We formally state this problem in a geometric framework and prove its NP-completeness. Next, we introduce a (polynomial) column-based heuristic, which turns out to be very satisfactory: We derive a theoretical performance guarantee for the heuristic and we assess its practical usefulness through MPI experiments},
author = {Beaumont, Olivier and Boudet, Vincent and Rastello, Fabrice and Robert, Yves},
doi = {10.1109/71.963416},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Beaumont et al. - 2001 - Matrix multiplication on heterogeneous platforms.pdf:pdf},
issn = {10459219},
journal = {IEEE Transactions on Parallel and Distributed Systems},
keywords = {printed},
mendeley-tags = {printed},
number = {10},
pages = {1033--1051},
title = {{Matrix multiplication on heterogeneous platforms}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=963416},
volume = {12},
year = {2001}
}
@book{Beineke2004,
abstract = {The rapidly expanding area of algebraic graph theory uses two different branches of algebra to explore various aspects of graph theory: linear algebra (for spectral theory) and group theory (for studying graph symmetry). These areas have links with other areas of mathematics, such as logic and harmonic analysis, and are increasingly being used in such areas as computer networks where symmetry is an important feature. Other books cover portions of this material, but this book is unusual in covering both of these aspects and there are no other books with such a wide scope. Peter J. Cameron, internationally recognized for his substantial contributions to the area, served as academic consultant for this volume, and the result is ten expository chapters written by acknowledged international experts in the field. Their well-written contributions have been carefully edited to enhance readability and to standardize the chapter structure, terminology and notation throughout the book. To help the reader, there is an extensive introductory chapter that covers the basic background material in graph theory, linear algebra and group theory. Each chapter concludes with an extensive list of references.},
editor = {Beineke, Lowell W. and Wilson, Robin J. and Cameron, Peter J.},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Unknown - 2004 - Topics in Algebraic Graph Theory (Encyclopedia of Mathematics and its Applications) (v. 1).pdf:pdf},
isbn = {0521801974},
pages = {294},
publisher = {Cambridge University Press},
title = {{Topics in Algebraic Graph Theory (Encyclopedia of Mathematics and its Applications) (v. 1)}},
url = {http://www.amazon.com/Topics-Algebraic-Encyclopedia-Mathematics-Applications/dp/0521801974},
year = {2004}
}
@article{Berkhin2006,
abstract = {Clustering is the division of data into groups of similar objects. In clustering, some details are disregarded in exchange for data simplification. Clustering can be viewed as a data modeling technique that provides for concise summaries of the data. Clustering is therefore related to many disciplines and plays an important role in a broad range of applications. The applications of clustering usually deal with large datasets and data with many attributes. Exploration of such data is a subject of data mining. This survey concentrates on clustering algorithms from a data mining perspective.},
author = {Berkhin, P},
chapter = {2},
doi = {10.1007/3-540-28349-8\_2},
editor = {Kogan, Jacob and Nicholas, Charles and Teboulle, Marc},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Berkhin - 2006 - A Survey of Clustering Data Mining Techniques.pdf:pdf},
institution = {Accrue Software},
isbn = {9783540283485},
journal = {Grouping Multidimensional Data},
number = {c},
pages = {25--71},
publisher = {Springer},
title = {{A Survey of Clustering Data Mining Techniques}},
url = {http://www.springerlink.com/index/x321256p66512121.pdf},
volume = {Cl},
year = {2006}
}
@book{Berry2004,
abstract = {Extracting content from text continues to be an important research problem for information processing and management. Approaches to capture the semantics of text-based document collections may be based on Bayesian models, probability theory, vector space models, statistical models, or even graph theory. As the volume of digitized textual media continues to grow, so does the need for designing robust, scalable indexing and search strategies (software) to meet a variety of user needs. Knowledge extraction or creation from text requires systematic yet reliable processing that can be codified and adapted for changing needs and environments. This book will draw upon experts in both academia and industry to recommend practical approaches to the purification, indexing, and mining of textual information. It will address document identification, clustering and categorizing documents, cleaning text, and visualizing semantic models of text.},
author = {Berry, Michael W.},
booktitle = {New York},
editor = {Berry, Michael W},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Berry - 2004 - Survey of Text Mining Clustering, Classification, and Retrieval.pdf:pdf},
isbn = {0387955631},
pages = {262},
publisher = {Springer},
title = {{Survey of Text Mining : Clustering, Classification, and Retrieval}},
url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.159.7323\&amp;rep=rep1\&amp;type=pdf},
year = {2004}
}
@book{Berry2008,
address = {London},
author = {Berry, Michael W. and Castellanos, Malu},
doi = {10.1007/978-1-84800-046-9},
editor = {Berry, Michael W. and Castellanos, Malu},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Berry, Castellanos - 2008 - Survey of Text Mining II.pdf:pdf},
isbn = {978-1-84800-045-2},
pages = {239},
publisher = {Springer London},
title = {{Survey of Text Mining II}},
url = {http://www.springerlink.com/index/10.1007/978-1-84800-046-9},
year = {2008}
}
@book{Berry2010,
author = {Berry, Michael W. and Kogan, Jacob},
booktitle = {Proceedings of the PAKDD 1999 Workshop on},
editor = {Berry, Michael W and Kogan, Jacob},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Berry, Kogan - 2010 - Text Mining Applications and Theory.pdf:pdf},
isbn = {9780470749821},
pages = {223},
publisher = {Wiley},
title = {{Text Mining: Applications and Theory}},
url = {http://books.google.com/books?hl=en\&amp;lr=\&amp;id=u-SrKyUrafsC\&amp;oi=fnd\&amp;pg=PR11\&amp;dq=Text+Mining+Applications+and+Theory\&amp;ots=WKkLqLwhD2\&amp;sig=OYW4YSrv7Ahu\_ljPAvlMIxDwiRc},
year = {2010}
}
@inproceedings{Bilardi2007,
author = {Bilardi, Gianfranco and Pietracaprina, Andrea and Pucci, Geppino and Silvestri, Francesco},
booktitle = {2007 IEEE International Parallel and Distributed Processing Symposium},
doi = {10.1109/IPDPS.2007.370243},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Bilardi et al. - 2007 - Network-Oblivious Algorithms.pdf:pdf},
isbn = {1-4244-0909-8},
pages = {1--10},
publisher = {IEEE},
title = {{Network-Oblivious Algorithms}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4227971},
year = {2007}
}
@book{Bini2007,
abstract = {Practical JRuby and Rails Web 2.0 Projects will be the first book on JRuby, an agile open source project that lets Java developers integrate and use the popular Ruby on Rails to build Web applications. The book author, JRuby project co-leader Ola Bini, is uniquely positioned to write about this, as he brings a unique blend of knowledge surrounding JRuby internals, the Java programming language and the Ruby programming language, along with solid experience building enterprise-level Rails applications.

Java has long been the enterprise-level development platform of choice. However a significant trend within the Java community is afoot: Continued interests in dynamic, agile languages such as Ruby are causing many previously staunch Java developers to reconsider their development strategies. However, the costs of doing so can be significant, as Java tools and libraries are both vast and mature.

Well, JRuby is providing a happy medium for these developers, by combining the power of Java with the flexibility of Ruby on Rails to create a completely new type of development strategy.},
author = {Bini, Ola},
edition = {1},
isbn = {9781590598818},
pages = {360},
publisher = {Apress},
title = {{Practical JRuby on Rails Web 2.0 Projects: Bringing Ruby on Rails to the Java Platform}},
year = {2007}
}
@inproceedings{Blelloch2008,
abstract = {This paper presents a multicore-cache model that reflects the reality that multicore processors have both per-processor private (L1) caches and a large shared (L2) cache on chip. We consider a broad class of parallel divide-and-conquer algorithms and present a new on-line scheduler, CONTROLLED-PDF, that is competitive with the standard sequential scheduler in the following sense. Given any dynamically unfolding computation DAG from this class of algorithms, the cache complexity on the multicore-cache model under our new scheduler is within a constant factor of the sequential cache complexity for both L1 and L2, while the time complexity is within a constant factor of the sequential time complexity divided by the number of processors p. These are the first such asymptotically-optimal results for any multicore model. Finally, we show that a separator-based algorithm for sparse-matrix-dense-vector-multiply achieves provably good cache performance in the multicore-cache model, as well as in the well-studied sequential cache-oblivious model.},
author = {Blelloch, Guy E. and Chowdhury, Rezaul Alam and Gibbons, Phillip B. and Ramachandran, Vijaya and Chen, Shimin and Kozuch, Michael},
booktitle = {Proceedings of the 19th annual ACMSIAM symposium on Discrete algorithms},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Blelloch et al. - 2008 - Provably good multicore cache performance for divide-and-conquer algorithms.pdf:pdf},
keywords = {printed},
mendeley-tags = {printed},
number = {1},
organization = {ACM/SIAM},
pages = {501--510},
publisher = {Society for Industrial and Applied Mathematics},
title = {{Provably good multicore cache performance for divide-and-conquer algorithms}},
url = {http://portal.acm.org/citation.cfm?id=1347082.1347137},
volume = {8},
year = {2008}
}
@inproceedings{Blelloch2010,
address = {New York, New York, USA},
author = {Blelloch, Guy E. and Gibbons, Phillip B. and Simhadri, Harsha Vardhan},
booktitle = {Proceedings of the 22nd ACM symposium on Parallelism in algorithms and architectures - SPAA '10},
doi = {10.1145/1810479.1810519},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Blelloch, Gibbons, Simhadri - 2010 - Low depth cache-oblivious algorithms.pdf:pdf},
isbn = {9781450300797},
keywords = {cache-oblivious algorithms,graph algorithms,multiprocessors,parallel algorithms,schedulers,sorting,sparse-matrix vector multiply},
month = jun,
pages = {189},
publisher = {ACM Press},
title = {{Low depth cache-oblivious algorithms}},
url = {http://dl.acm.org/citation.cfm?id=1810479.1810519},
year = {2010}
}
@inproceedings{Blumofe,
abstract = {This paper studies the problem of efficiently scheduling fully strict (i.e., well-structured) multithreaded computations on parallel computers. A popular and practical method of scheduling this kind of dynamic MIMD-style computation is “work stealing,” in which processors needing work steal computational threads from other processors. In this paper, we give the first provably good work-stealing scheduler for multithreaded computations with dependencies. Specifically, our analysis shows that the expected time TP to execute a fully strict computation on P processors using our work-stealing scheduler is TP=O(T1/P+T∞ ), where T1 is the minimum serial execution time of the multithreaded computation and T∞ is the minimum execution time with an infinite number of processors. Moreover, the space SP required by the execution satisfies SP\&les;S1P. We also show that the expected total communication of the algorithm is at most O(T∞Smax P), where Smax is the size of the largest activation record of any thread, thereby justifying the folk wisdom that work-stealing schedulers are more communication efficient than their work-sharing counterparts. All three of these bounds are existentially optimal to within a constant factor},
author = {Blumofe, Robert D. and Leiserson, Charles E},
booktitle = {Proceedings 35th Annual Symposium on Foundations of Computer Science},
doi = {10.1109/SFCS.1994.365680},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Blumofe, Leiserson - 1994 - Scheduling multithreaded computations by work stealing.pdf:pdf},
isbn = {0-8186-6580-7},
pages = {356--368},
publisher = {IEEE Comput. Soc. Press},
title = {{Scheduling multithreaded computations by work stealing}},
url = {http://ieeexplore.ieee.org/xpl/freeabs\_all.jsp?arnumber=365680 http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=365680},
year = {1994}
}
@article{Bock2012,
abstract = {We present an optimized single-precision implementation of the Sparse Approximate Matrix Multiply ($\backslash$SpAMM\{\}) [M. Challacombe and N. Bock, arXiv \{$\backslash$bf 1011.3534\} (2010)], a fast algorithm for matrix-matrix multiplication for matrices with decay that achieves an \$\backslash mathcal\{O\} (n \backslash ln n)\$ computational complexity with respect to matrix dimension \$n\$. We find that the max norm of the error matrix achieved with a $\backslash$SpAMM\{\} tolerance of below \$2 \backslash times 10\^{}\{-8\}\$ is lower than that of the single-precision \{$\backslash$tt SGEMM\} for quantum chemical test matrices, while outperforming \{$\backslash$tt SGEMM\} with a cross-over already for small matrices (\$n \backslash sim 1000\$). Relative to naive implementations of $\backslash$SpAMM\{\} using optimized versions of \{$\backslash$tt SGEMM\}, such as those found in Intel's Math Kernel Library (\{$\backslash$tt MKL\}) or AMD's Core Math Library (\{$\backslash$tt ACML\}), our optimized version is found to be significantly faster. Detailed performance comparisons are made with for quantum chemical matrices of RHF/STO-2G and RHF/6-31G\$\{\}\^{}\{**\}\$ water clusters.},
archivePrefix = {arXiv},
arxivId = {1203.1692},
author = {Bock, Nicolas and Challacombe, Matt},
eprint = {1203.1692},
keywords = {printed},
mendeley-tags = {printed},
month = mar,
title = {{An Optimized Sparse Approximate Matrix Multiply}},
url = {http://arxiv.org/abs/1203.1692},
year = {2012}
}
@article{Bock2008,
abstract = {We investigate effects of ordering in blocked matrix--matrix multiplication. We find that submatrices do not have to be stored contiguously in memory to achieve near optimal performance. Instead it is the choice of execution order of the submatrix multiplications that leads to a speedup of up to four times for small block sizes. This is in contrast to results for single matrix elements showing that contiguous memory allocation quickly becomes irrelevant as the blocksize increases.},
archivePrefix = {arXiv},
arxivId = {0808.1108},
author = {Bock, Nicolas and Rubensson, Emanuel H. and Sałek, Paweł and Niklasson, Anders M. N. and Challacombe, Matt},
eprint = {0808.1108},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Bock et al. - 2008 - Cache oblivious storage and access heuristics for blocked matrix-matrix multiplication.pdf:pdf},
journal = {Linear Algebra and its Applications},
keywords = {computer calculations,mathematics,matrix elements,memory devices,performance},
month = aug,
title = {{Cache oblivious storage and access heuristics for blocked matrix-matrix multiplication}},
url = {http://arxiv.org/abs/0808.1108},
year = {2008}
}
@book{Boguski1993,
abstract = {Computerized sequence analysis is an integral part of biotechnological research, yet many biologists have received no formal training in this important technology. Sequence Analysis Primer offers the beginner the necessary background to enter this vital field and helps more seasoned researchers to fine-tune their approach. It covers basic data manipulation such as homology searches, stem-loop identification, and protein secondary structure prediction, and is compatible with most sequence analysis programs. A detailed example giving steps for characterizing a new gene sequence provides users with hands-on experience when combined with their current software. The book will be invaluable to researchers and students in molecular biology, genetics, biochemistry, microbiology, and biotechnology},
author = {Boguski, Mark S. and Caballero, Lisa and Eisenberg, David and Elliston, Keith and L\"{u}thy, Roland and Rice, Roland M. and States, David J.},
editor = {Gribskov, Michael and Devereux, John},
isbn = {978-0195098747},
keywords = {base composition bias,base frequency,codon preference,conserved base,dot matrix analysis,equivalent alignments,first immunoglobulin domain,gap creation penalty,gap extension penalty,human beta adrenergic receptor,hydrophobic moment,incorrect overlap,internal repeats,matrix corrected,mean hydrophobicity,overlap detection,plotting threshold,previous alignment,score matrix,sequence analysis packages,sequence assembly programs,similarity and homology,synonymous family,unsequenced regions,using dot matrix methods},
language = {English},
pages = {296},
publisher = {Oxford University Press},
title = {{Sequence Analysis Primer}},
year = {1993}
}
@book{Bosanac2007,
abstract = {Using the JavaTM platform’s new scripting support, you can improve efficiency, streamline your development processes, and solve problems ranging from prototyping to Web application programming. In Scripting in Java, Dejan Bosanac covers key aspects of scripting with Java, from the exciting new Groovy scripting language to Java’s new Scripting and Web Scripting APIs.
 
Bosanac begins by reviewing the role and value of scripting languages, and then systematically introduces today’s best scripting solutions for the Java platform. He introduces Java scripting frameworks, identifies proven patterns for integrating scripting into Java applications, and presents practical techniques for everything from unit testing to project builds. He supports key concepts with extensive code examples that demonstrate scripting at work in real-world Java projects. Coverage includes
 
·        Why scripting languages offer surprising value to Java programmers
·        Scripting languages that run inside the JVM: BeanShell, JavaScript, and Python
·        Groovy in depth: installation, configuration, Java-like syntax, Java integration, security, and more
·        Groovy extensions: accessing databases, working with XML, and building simple Web applications and Swing-based UIs
·        Bean Scripting Framework: implementation, basic abstractions, and usage examples
·        Traditional and new patterns for Java-based scripting
·        JSR 223 Scripting API: language bindings, discovery mechanisms, threading, pluggable namespaces, and more
·        JSR 223 Web Scripting Framework: scripting the generation of Web content within servlet containers},
author = {Bosanac, Dejan},
isbn = {978-0321321930},
pages = {554},
publisher = {Addison-Wesley Professional},
title = {{Scripting in Java: Languages, Frameworks, and Patterns}},
year = {2007}
}
@book{Bourne2003,
edition = {1},
editor = {Bourne, Philip E. and Weissig, Helge},
isbn = {9780471201991},
pages = {704},
publisher = {John Wiley \& Sons, Inc.},
title = {{Structural Bioinformatics (Methods of Biochemical Analysis)}},
year = {2003}
}
@article{Bouveyron2006,
abstract = {Clustering in high-dimensional spaces is a difficult problem which is recurrent in many domains, for example in image analysis. The difficulty is due to the fact that high-dimensional data usually live in different low-dimensional subspaces hidden in the original space. This paper presents a family of Gaussian mixture models designed for high-dimensional data which combine the ideas of dimension reduction and parsimonious modeling. These models give rise to a clustering method based on the Expectation-Maximization algorithm which is called High-Dimensional Data Clustering (HDDC). In order to correctly fit the data, HDDC estimates the specific subspace and the intrinsic dimension of each group. Our experiments on artificial and real datasets show that HDDC outperforms existing methods for clustering high-dimensional data},
author = {Bouveyron, Charles and Girard, Stephane and Schmid, Cordelia},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Bouveyron, Girard, Schmid - 2006 - High-Dimensional Data Clustering.pdf:pdf},
journal = {Computational Statistics \& Data Analysis},
number = {1},
pages = {502--519},
publisher = {CNRS},
title = {{High-Dimensional Data Clustering}},
url = {http://eprints.pascal-network.org/archive/00002737/},
volume = {52},
year = {2006}
}
@inproceedings{Boyd1993,
abstract = {Communication has a dominant impact on the performance of massively parallel processors (MPPs). We propose a methodology to evaluate the internode communication performance of MPPs using a controlled set of synthetic workloads. By generating a range of sparse matrices and measuring the performance of a simple parallel algorithm that repeatedly multiplies a sparse matrix by a dense vector, we can determine the relative performance of different communication workloads. Specifiable communication parameters include the number of nodes, the average amount of communication per node, the degree of sharing among the nodes, and the computation-communication ratio. We describe a general procedure for constructing sparse matrices that have these desired communication and computation parameters, and apply a range of these synthetic workloads to evaluate the hierarchical ring interconnection and cache-only memory architecture (COMA) of the Kendall Square Research KSRI MPP. This analysis discusses the impact of the KSRI architecture on communication performance, highlighting the utility and impact of the automatic update feature. It also investigates the impact of system contention on the performance, particularly how it causes potential updates to be ignored.},
author = {Boyd, Eric L and Wellman, John-David and Abraham, Santosh G and Davidson, Edward S},
booktitle = {Proceedings of the 7th international conference on Supercomputing ICS 93},
doi = {10.1145/165939.165974},
isbn = {089791600X},
pages = {240--250},
publisher = {ACM},
series = {ICS '93},
title = {{Evaluating the communication performance of MPPs using synthetic sparse matrix multiplication workloads}},
url = {http://portal.acm.org/citation.cfm?id=165939.165974\&coll=GUIDE\&dl=GUIDE\&CFID=94888687\&CFTOKEN=22273710},
year = {1993}
}
@inproceedings{Boyer2009,
address = {New York, New York, USA},
author = {Boyer, Brice and Dumas, Jean-Guillaume and Pernet, Cl\'{e}ment and Zhou, Wei},
booktitle = {Proceedings of the 2009 international symposium on Symbolic and algebraic computation - ISSAC '09},
doi = {10.1145/1576702.1576713},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Boyer et al. - 2009 - Memory efficient scheduling of Strassen-Winograd's matrix multiplication algorithm.pdf:pdf},
isbn = {9781605586090},
keywords = {matrix multiplication,memory placement,printed,strassen-winograd's algorithm},
mendeley-tags = {printed},
month = jul,
pages = {55},
publisher = {ACM Press},
title = {{Memory efficient scheduling of Strassen-Winograd's matrix multiplication algorithm}},
url = {http://dl.acm.org/citation.cfm?id=1576702.1576713},
year = {2009}
}
@article{Brain1990,
abstract = {This article presents a simple algorithm for packing sparse 2-D arrays into minimal 1-D arrays in O(r2) time. Retrieving an element from the packed 1-D array is O(1). This packing algorithm is then applied to create minimal perfect hashing functions for large word lists. Many existing perfect hashing algorithms process large word lists by segmenting them into several smaller lists. The perfect hashing function described in this article has been used to create minimal perfect hashing functions for unsegmented word sets of up to 5000 words. Compared with other current algorithms for perfect hashing, this algorithm is a significant improvement in terms of both time and space efficiency.},
author = {Brain, Marshall D. and Tharp, Alan L},
doi = {10.1016/0306-4379(90)90001-6},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Brain, Tharp - 1990 - Perfect hashing using sparse matrix packing.pdf:pdf},
issn = {03064379},
journal = {Information Systems},
keywords = {hashing,minimal perfect hashing,perfect hashing,printed,sparse matrix packing},
mendeley-tags = {printed},
month = jan,
number = {3},
pages = {281--290},
title = {{Perfect hashing using sparse matrix packing}},
url = {http://dx.doi.org/10.1016/0306-4379(90)90001-6},
volume = {15},
year = {1990}
}
@incollection{Brandes2003,
abstract = {A promising approach to graph clustering is based on the intuitive notion of intra-cluster density vs. inter-cluster sparsity. While both formalizations and algorithms focusing on particular aspects of this rather vague concept have been proposed no conclusive argument on their appropriateness has been given.},
address = {Berlin, Heidelberg},
author = {Brandes, Ulrik and Gaertler, Marco and Wagner, Dorothea},
booktitle = {Algorithms - ESA 2003},
doi = {10.1007/b13632},
editor = {Battista, Giuseppe and Zwick, Uri},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Brandes, Gaertler, Wagner - 2003 - Experiments on graph clustering algorithms.pdf:pdf},
isbn = {978-3-540-20064-2},
keywords = {printed},
mendeley-tags = {printed},
number = {Lncs 2832},
pages = {568--579},
publisher = {Springer Berlin Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{Experiments on graph clustering algorithms}},
url = {http://www.springerlink.com/index/10.1007/b13632},
volume = {2832},
year = {2003}
}
@book{Breshears2009,
abstract = {If you're looking to take full advantage of multi-core processors with concurrent programming, this practical book provides the knowledge and hands-on experience you need. The Art of Concurrency is one of the few resources to focus on implementing algorithms in the shared-memory model of multi-core processors, rather than just theoretical models or distributed-memory architectures. The book provides detailed explanations and usable samples to help you transform algorithms from serial to parallel code, along with advice and analysis for avoiding mistakes that programmers typically make when first attempting these computations. Written by an Intel engineer with over two decades of parallel and concurrent programming experience, this book will help you: Understand parallelism and concurrency Explore differences between programming for shared-memory and distributed-memory Learn guidelines for designing multithreaded applications, including testing and tuning Discover how to make best use of different threading libraries, including Windows threads, POSIX threads, OpenMP, and Intel Threading Building Blocks Explore how to implement concurrent algorithms that involve sorting, searching, graphs, and other practical computations The Art of Concurrency shows you how to keep algorithms scalable to take advantage of new processors with even more cores. For developing parallel code algorithms for concurrent programming, this book is a must.},
author = {Breshears, Clay},
edition = {1},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Breshears - 2009 - The Art of Concurrency.pdf:pdf},
isbn = {978-0596521530},
pages = {304},
publisher = {O’Reilly Media},
title = {{The Art of Concurrency}},
year = {2009}
}
@article{Brohee2008,
abstract = {The network analysis tools (NeAT) provide a user-friendly web access to a collection of modular tools for the analysis of networks (graphs) and clusters (e.g. microarray clusters, functional classes, etc.). A first set of tools supports basic operations on graphs (comparison between two graphs, neighborhood of a set of input nodes, path finding and graph randomization). Another set of programs makes the connection between networks and clusters (graph-based clustering, cliques discovery and mapping of clusters onto a network). The toolbox also includes programs for detecting significant intersections between clusters/classes (e.g. clusters of co-expression versus functional classes of genes). NeAT are designed to cope with large datasets and provide a flexible toolbox for analyzing biological networks stored in various databases (protein interactions, regulation and metabolism) or obtained from high-throughput experiments (two-hybrid, mass-spectrometry and microarrays). The web interface interconnects the programs in predefined analysis flows, enabling to address a series of questions about networks of interest. Each tool can also be used separately by entering custom data for a specific analysis. NeAT can also be used as web services (SOAP/WSDL interface), in order to design programmatic workflows and integrate them with other available resources.},
author = {Broh\'{e}e, Sylvain and Faust, Karoline and Lima-Mendez, Gipsi and Sand, Olivier and Janky, Rekin's and Vanderstocken, Gilles and Deville, Yves and {Van Helden}, Jacques},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Broh\'{e}e et al. - 2008 - NeAT a toolbox for the analysis of biological networks, clusters, classes and pathways.pdf:pdf},
institution = {Laboratoire de Bioinformatique des G\'{e}nomes et R\'{e}seaux (BiGRE), Universit\'{e} Libre de Bruxelles (ULB), Boulevard du Triomphe, CP263, B-1050 Bruxelles, Belgium. sylvain@scmbb.ulb.ac.be},
journal = {Nucleic Acids Research},
keywords = {cluster analysis,computer graphics,gene expression regulation,internet,metabolic networks pathways,oligonucleotide array sequence analysis,protein interaction mapping,signal transduction,software},
number = {Web Server issue},
pages = {W444--W451},
publisher = {Oxford University Press},
title = {{NeAT: a toolbox for the analysis of biological networks, clusters, classes and pathways}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/18524799},
volume = {36},
year = {2008}
}
@article{Brohee2006a,
abstract = {Protein interactions are crucial components of all cellular processes. Recently, high-throughput methods have been developed to obtain a global description of the interactome (the whole network of protein interactions for a given organism). In 2002, the yeast interactome was estimated to contain up to 80,000 potential interactions. This estimate is based on the integration of data sets obtained by various methods (mass spectrometry, two-hybrid methods, genetic studies). High-throughput methods are known, however, to yield a non-negligible rate of false positives, and to miss a fraction of existing interactions. The interactome can be represented as a graph where nodes correspond with proteins and edges with pairwise interactions. In recent years clustering methods have been developed and applied in order to extract relevant modules from such graphs. These algorithms require the specification of parameters that may drastically affect the results. In this paper we present a comparative assessment of four algorithms: Markov Clustering (MCL), Restricted Neighborhood Search Clustering (RNSC), Super Paramagnetic Clustering (SPC), and Molecular Complex Detection (MCODE).},
author = {Broh\'{e}e, Sylvain and van Helden, Jacques},
doi = {10.1186/1471-2105-7-488},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Broh\'{e}e, van Helden - 2006 - Evaluation of clustering algorithms for protein-protein interaction networks.pdf:pdf},
issn = {1471-2105},
journal = {BMC Bioinformatics},
keywords = {Algorithms,Cluster Analysis,Computational Biology,Computer Graphics,Databases,Markov Chains,Mass Spectrometry,Protein,Protein Binding,Protein Interaction Mapping,Saccharomyces cerevisiae Proteins,Saccharomyces cerevisiae Proteins: chemistry,Saccharomyces cerevisiae Proteins: metabolism,Software Validation,Two-Hybrid System Techniques,printed},
mendeley-tags = {printed},
month = jan,
pages = {488},
pmid = {17087821},
title = {{Evaluation of clustering algorithms for protein-protein interaction networks.}},
url = {http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=1637120\&tool=pmcentrez\&rendertype=abstract},
volume = {7},
year = {2006}
}
@phdthesis{Buluc2010,
author = {Bulu\c{c}, Aydın},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Bulu\c{c} - 2010 - Linear algebraic primitives for parallel computing on large graphs.pdf:pdf},
pages = {280},
school = {University of California Santa Barbara},
title = {{Linear algebraic primitives for parallel computing on large graphs}},
type = {Ph.D Thesis},
url = {http://gauss.cs.ucsb.edu/~aydin/publications.html},
year = {2010}
}
@article{BuluCc2010,
abstract = {Generalized sparse matrix-matrix multiplication is a key primitive for many high performance graph algorithms as well as some linear solvers such as multigrid. We present the first parallel algorithms that achieve increasing speedups for an unbounded number of processors. Our algorithms are based on two-dimensional block distribution of sparse matrices where serial sections use a novel hypersparse kernel for scalability. We give a state-of-the-art MPI implementation of one of our algorithms. Our experiments show scaling up to thousands of processors on a variety of test scenarios.},
author = {Bulu\c{c}, Aydın and Gilbert, John R.},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Bulu\c{c}, Gilbert - 2010 - Highly Parallel Sparse Matrix-Matrix Multiplication.pdf:pdf},
institution = {UCSB},
journal = {Arxiv preprint arXiv10062183},
keywords = {graph algorithms,hypersparsity,ipdps,material paper,parallel linear algebra,printed,sparse,sparse matrix matrix multiplication,summa},
mendeley-tags = {printed},
pages = {13--15},
publisher = {ACM},
title = {{Highly Parallel Sparse Matrix-Matrix Multiplication}},
url = {http://arxiv.org/abs/1006.2183},
volume = {cs.DC},
year = {2010}
}
@article{Buluc2011,
abstract = {Generalized sparse matrix-matrix multiplication (or SpGEMM) is a key primitive for many high performance graph algorithms as well as for some linear solvers, such as algebraic multigrid. Here we show that SpGEMM also yields efficient algorithms for general sparse-matrix indexing in distributed memory, provided that the underlying SpGEMM implementation is sufficiently flexible and scalable. We demonstrate that our parallel SpGEMM methods, which use two-dimensional block data distributions with serial hypersparse kernels, are indeed highly flexible, scalable, and memory-efficient in the general case. This algorithm is the first to yield increasing speedup to an unbounded number of processors; our experiments show scaling up to thousands of processors in a variety of test scenarios.},
archivePrefix = {arXiv},
arxivId = {1109.3739},
author = {Bulu\c{c}, Aydın and Gilbert, John R.},
eprint = {1109.3739},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Bulu\c{c}, Gilbert - 2011 - Parallel Sparse Matrix-Matrix Multiplication and Indexing Implementation and Experiments.pdf:pdf},
journal = {SIAM Journal of Scientific Computing},
keywords = {printed},
mendeley-tags = {printed},
month = sep,
title = {{Parallel Sparse Matrix-Matrix Multiplication and Indexing: Implementation and Experiments}},
url = {http://arxiv.org/abs/1109.3739},
year = {2011}
}
@inproceedings{Buluc2008a,
abstract = {Multicore processors are marking the beginning of a new era of computing where massive parallelism is available and necessary. Slightly slower but easy to parallelize kernels are becoming more valuable than sequentially faster kernels that are unscalable when parallelized. In this paper, we focus on the multiplication of sparse matrices (SpGEMM). We first present the issues with existing sparse matrix representations and multiplication algorithms that make them unscalable to thousands of processors. Then, we develop and analyze two new algorithms that overcome these limitations. We consider our algorithms first as the sequential kernel of a scalable parallel sparse matrix multiplication algorithm and second as part of a polyalgorithm for SpGEMM that would execute different kernels depending on the sparsity of the input matrices. Such a sequential kernel requires a new data structure that exploits the hypersparsity of the individual submatrices owned by a single processor after the 2D partitioning. We experimentally evaluate the performance and characteristics of our algorithms and show that they scale significantly better than existing kernels.},
author = {Bulu\c{c}, Aydın and Gilbert, John R.},
booktitle = {Parallel and Distributed Processing},
doi = {10.1109/IPDPS.2008.4536313},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Bulu\c{c}, Gilbert - 2008 - On the representation and multiplication of hypersparse matrices.pdf:pdf},
isbn = {9781424416936},
issn = {15302075},
keywords = {printed},
mendeley-tags = {printed},
pages = {1--11},
publisher = {IEEE International Symposium on},
title = {{On the representation and multiplication of hypersparse matrices}},
url = {http://ieeexplore.ieee.org/xpl/freeabs\_all.jsp?arnumber=4536313},
year = {2008}
}
@article{Buluc2008,
abstract = {We identify the challenges that are special to parallel sparse matrix-matrix multiplication (PSpGEMM). We show that sparse algorithms are not as scalable as their dense counterparts, because in general, there are not enough non-trivial arithmetic operations to hide the communication costs as well as the sparsity overheads. We analyze the scalability of 1D and 2D algorithms for PSpGEMM. While the 1D algorithm is a variant of existing implementations, 2D algorithms presented are completely novel. Most of these algorithms are based on the previous research on parallel dense matrix multiplication. We also provide results from preliminary experiments with 2D algorithms.},
author = {Bulu\c{c}, Aydın and Gilbert, John R.},
doi = {10.1109/ICPP.2008.45},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Bulu\c{c}, Gilbert - 2008 - Challenges and Advances in Parallel Sparse Matrix-Matrix Multiplication.pdf:pdf},
issn = {01903918},
journal = {2008 37th International Conference on Parallel Processing},
keywords = {printed},
mendeley-tags = {printed},
pages = {503--510},
publisher = {Ieee},
title = {{Challenges and Advances in Parallel Sparse Matrix-Matrix Multiplication}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4625887},
volume = {252},
year = {2008}
}
@article{Burns2000,
abstract = {The performance cliaracteris tics of several classes of parallel computing systems are analyzed and com pared using high-fidelity modeling and execution- driven simulation. Processor, bus and network mod els are used to construct and simulate the architec tures of symmetric multiprocessors (SMPs), clusters of uniprocessors, and clusters of SMPs. To demon strate a typical use, the performance of ten systems is evaluated using a parallel matrix-multiplication algorithm. Because the performance of a parallel algo rithm on an architecture depends on its communica tion-to-computation ratio, an analysis of communica tion latencies for bus transactions, cache coherence, and network transactions is used to quantify each system's communication overhead. While low-level performance attributes are difficult to measure on experimental testbed systems, and are difficult to accurately represent in purely analytical models, with high fidelity simulative models they can be readily and accurately obtained. This level of detail allows the designer to rapidly prototype and evaluate the performance of parallel and distributed systems.},
author = {Burns, Mark W. and George, Alan D. and Wallace, Bradley A.},
doi = {10.1177/003754970007400203},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Burns, George, Wallace - 2000 - Modeling and Simulative Performance Analysis of SMP and Clustered Computer Architectures.pdf:pdf},
issn = {00375497},
journal = {Simulation},
number = {2},
pages = {84--96},
title = {{Modeling and Simulative Performance Analysis of SMP and Clustered Computer Architectures}},
url = {http://sim.sagepub.com/cgi/content/abstract/74/2/84},
volume = {74},
year = {2000}
}
@inproceedings{Bustamam2010a,
abstract = {Markov clustering is becoming a key algorithm with in bioinformatics for determining clusters in networks. For instance, clustering protein interaction networks is helping find genes implicated in diseases such as cancer. However, with fast sequencing and other technologies generating vast amounts of data on biological networks, performance and scalability issues are becoming a critical limiting factorin applications. Meanwhile, Graphics Processing (GPU)computing, which uses a massively parallel computing environment in the GPU card, is becoming a very powerful, efficient and low cost option to achieve substantial performance gains over CPU approaches. This paper introduces a very fast Markov clustering algorithm (MCL) based on massive parallel computing in GPU. We use the Compute Unified Device Architecture (CUDA) to allow the GPU to perform parallel sparse matrix-matrix computations and parallel sparse Markov matrix normalizations, which are at the heart of the clustering algorithm. The key to optimizing our CUDA Markov Clustering (CUDAMCL) was utilizing ELLACK-R sparse data format to allow the effective and fine-grain massively parallel processing to cope with the sparse nature of interaction networks datasets in bioinformatics applications. CUDA also allows us to use on-chip memory on the GPU efficiently, to lower the latency time thus circumventing a major issue in other parallel computing environments, such as Message Passing Interface (MPI). Here we describe the GPU algorithm and its application to several real world problems as well as to artificial datasets. We find that the principle factor causing variation in performance of the GPU approach is the relative sparseness of networks. Comparing GPU computation times against a modern quad-core CPU on the published(relatively sparse) standard BIOGRID protein interaction networks with 5156 and 23175 nodes, speed factors of 4 times and 9 were obtained, respectively. On the Human Protein Reference Database, the sp- - eed of clustering of 19599 proteins was improved by a factor of 7 by the GPU algorithm. However, on artificially generated densely connected networks with 1600 to 4800 nodes, speedups by a factor in the range 40 to 120 times were readily obtained. As the results show, in all cases the GPU implementation is significantly faster than the original MCL running on CPU. Such approaches are allowing large-scale parallel computation on off-the-shelf desktop machines that were previously only possible on super-computing architectures, and have the potential to significantly change the way bioinformaticians and biologists compute and interact with their data.},
author = {Bustamam, Alhadi and Burrage, Kevin and Hamilton, Nicholas A.},
booktitle = {2010 Ninth International Workshop on Parallel and Distributed Methods in Verification, and Second International Workshop on High Performance Computational Systems Biology},
doi = {10.1109/PDMC-HiBi.2010.23},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Bustamam, Burrage, Hamilton - 2010 - Fast Parallel Markov Clustering in Bioinformatics Using Massively Parallel Graphics Processing Unit Computing.pdf:pdf},
isbn = {978-1-4244-8753-0},
keywords = {printed},
mendeley-tags = {printed},
month = sep,
pages = {116--125},
publisher = {IEEE},
title = {{Fast Parallel Markov Clustering in Bioinformatics Using Massively Parallel Graphics Processing Unit Computing}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5698477},
year = {2010}
}
@article{Bustamam2010,
abstract = {The massively parallel computing using graphical processing unit (GPU), which based on tens of thousands of parallel threats within hundreds of GPU's streaming processors, has gained broad popularity and attracted researchers in a wide range of application areas from finance, computer aided engineering, computational fluid dynamics, game physics, numerics, science, medical imaging, life science, and so on, including molecular biology and bioinformatics. Meanwhile, Markov clustering algorithm (MCL) has become one of the most effective and highly cited methods to detect and analyze the communities/clusters within an interaction network dataset on many real world problems such us social, technological, or biological networks including protein-protein interaction networks. However, as the dataset become bigger and bigger, the computation time of MCL algorithm become slower and slower. Hence, GPU computing is an interesting and challenging alternative to attempt to improve the MCL performance. In this poster paper we introduce our improvement of MCL performance based on ELLPACK-R sparse dataset format using GPU computing with the Compute Unified Device Architecture tool (CUDA) from NVIDIA (called CUDA-MCL). As the results show the significant improvement in CUDA-MCL performance and with the low-cost and widely available GPU devices in the market today, this CUDA-MCL implementation is allowing large-scale parallel computation on off-the-shelf desktop machines. Moreover the GPU computing approaches potentially may contribute to significantly change the way bioinformaticians and biologists compute and interact with their data.},
author = {Bustamam, Alhadi and Burrage, Kevin and Hamilton, Nicholas A.},
doi = {10.1109/ACT.2010.10},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Bustamam, Burrage, Hamilton - 2010 - A GPU Implementation of Fast Parallel Markov Clustering in Bioinformatics Using EllPACK-R Sparse Data Format.pdf:pdf},
isbn = {9781424487462},
journal = {Advances in Computing Control and Telecommunication Technologies International Conference on},
keywords = {printed},
mendeley-tags = {printed},
pages = {173--175},
publisher = {Ieee},
title = {{A GPU Implementation of Fast Parallel Markov Clustering in Bioinformatics Using EllPACK-R Sparse Data Format}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5675816},
volume = {0},
year = {2010}
}
@incollection{Caballero1992,
author = {Caballero, Lisa},
booktitle = {Sequence Analysis Primer},
chapter = {4},
editor = {Gribskov, Michael and Devereux, John},
isbn = {9780195098747},
pages = {159--221},
publisher = {Oxford University Press},
title = {{Practical Aspects: Analysis of Notch}},
year = {1992}
}
@inproceedings{Cai2010,
author = {Cai, Bing-Jing and Wang, Hai-Ying and Zheng, Hui-Ru and Wang, Hui},
booktitle = {2010 International Conference on Machine Learning and Cybernetics},
doi = {10.1109/ICMLC.2010.5580953},
isbn = {978-1-4244-6526-2},
month = jul,
pages = {1849--1854},
publisher = {IEEE},
title = {{Evaluation repeated random walks in community detection of social networks}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5580953},
year = {2010}
}
@article{Cai2005,
abstract = {We propose a novel document clustering method which aims to cluster the documents into different semantic classes. The document space is generally of high dimensionality and clustering in such a high dimensional space is often infeasible due to the curse of dimensionality. By using Locality Preserving Indexing (LPI), the documents can be projected into a lower-dimensional semantic space in which the documents related to the same semantics are close to each other. Different from previous document clustering methods based on Latent Semantic Indexing (LSI) or Nonnegative Matrix Factorization (NMF), our method tries to discover both the geometric and discriminating structures of the document space. Theoretical analysis of our method shows that LPI is an unsupervised approximation of the supervised Linear Discriminant Analysis (LDA) method, which gives the intuitive motivation of our method. Extensive experimental evaluations are performed on the Reuters-21578 and TDT2 data sets.},
author = {Cai, Deng and He, Xiaofeng and Han, Jiawei},
doi = {10.1109/TKDE.2005.198},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Cai, He, Han - 2005 - Document clustering using locality preserving indexing.pdf:pdf},
issn = {10414347},
journal = {IEEE Transactions on Knowledge and Data Engineering},
number = {12},
pages = {1624--1637},
pmid = {1524963},
title = {{Document clustering using locality preserving indexing}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=1524963},
volume = {17},
year = {2005}
}
@unpublished{Campagna2012,
author = {Campagna, Andrea and Kutzkov, Konstantin and Pagh, Rasmus},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Campagna, Kutzkov, Pagh - 2012 - On Parallelizing the Column-Row Method.pdf:pdf},
institution = {IT University of Copenhagen},
keywords = {printed},
mendeley-tags = {printed},
title = {{On Parallelizing the Column-Row Method}},
year = {2012}
}
@unpublished{Campagna2012,
abstract = {We present a simple method for “consistent” parallel processing of sparse outer products (columnrow vector products) over several processors, in a communication-avoiding setting where each processor has a copy of the input. The method is consistent in the sense that a given output entry is always assigned to the same processor independently of the specific structure of the outer product. We show guarantees on the work done by each processor, and achieve linear speedup down to the point where the cost is dominated by reading the input. Our method applies to the streaming model of computation where the vectors for each product successively arrive at high speed and a sequential processing would be the main computational bottleneck. Important algorithmic problems that be approached using our method are sparse matrix multiplication and frequent pair mining in a data stream. Motivated by the observation that pair frequencies adhere to a power law we theoretically analyze the precision of our method under this assumption. Experiments on real life data sets confirm the theoretical findings.},
address = {Copenhagen},
annote = {p. 4 section 3. Space-Saving only restricted to positive updates. Contradicts Cormode 2009
      },
author = {Campagna, Andrea and Kutzkov, Konstantin and Pagh, Rasmus},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Campagna, Kutzkov, Pagh - 2012 - A New Approach to Parallelizing the Column-Row Method.pdf:pdf},
institution = {IT University Copenhagen},
keywords = {frequent pattern mining,parallel algorithms,printed,sparse matrix multiplication},
mendeley-tags = {printed},
title = {{A New Approach to Parallelizing the Column-Row Method}},
year = {2012}
}
@inproceedings{Campagna2009,
abstract = {Sampling-based methods have previously been proposed for the problem of finding interesting associations in data, even for low-support items. While these methods do not guarantee precise results, they can be vastly more efficient than approaches that rely on exact counting. However, for many similarity measures no such methods have been known. In this paper we show how a wide variety of measures can be supported by a simple biased sampling method. The method also extends to find high-confidence association rules. We demonstrate theoretically that our method is superior to exact methods when the threshold for "interesting similarity/confidence" is above the average pairwise similarity/confidence, and the average support is not too low. Our method is particularly good when transactions contain many items. We confirm in experiments on standard association mining benchmarks that this gives a significant speedup on real data sets (sometimes much larger than the theoretical guarantees). Reductions in computation time of over an order of magnitude, and significant savings in space, are observed.},
author = {Campagna, Andrea and Pagh, Rasmus},
booktitle = {2009 Ninth IEEE International Conference on Data Mining},
month = dec,
pages = {61--70},
publisher = {IEEE},
title = {{Finding Associations and Computing Similarity via Biased Pair Sampling}},
url = {http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=5360231},
year = {2009}
}
@article{Cannataro2008,
author = {Cannataro, Mario and Guzzi, Pietro Hiram and Veltri, Pierangelo},
doi = {10.1109/CBMS.2008.113},
isbn = {9780769531656},
journal = {2008 21st IEEE International Symposium on ComputerBased Medical Systems},
keywords = {clustering,complexes,edges,graph,graph structure,mcl,protein,protein protein interactions,proteins,structural properties},
pages = {179--184},
publisher = {Ieee},
title = {{myMCL: A Web Portal for Protein Complexes Prediction}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4561983},
year = {2008}
}
@techreport{Chatterjee2009,
abstract = {Let P be a set of points in Rd. We propose GeoFilterKruskal, an algorithm that com- putes the minimum spanning tree of P using well separated pair decomposition in combination with a simple modication of Kruskal's algorithm. When P is sampled from uniform random dis- tribution, we show that our algorithm runs in O(n log2 n) time with probability at least 1},
author = {Chatterjee, Siddhartha and Connor, Michael and Kumar, Piyush},
booktitle = {csfsuedu},
doi = {10.1.1.150.9357},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Chatterjee, Connor, Kumar - 2009 - Computing Geometric Minimum Spanning Trees Using the Filter-Kruskal Method.pdf:pdf},
institution = {Florida State University},
keywords = {computational geometry,experimental algorithmics,minimum spanning tree,morton ordering,multicore,separated pair decomposition},
pages = {1--12},
title = {{Computing Geometric Minimum Spanning Trees Using the Filter-Kruskal Method}},
url = {http://websrv.cs.fsu.edu/research/reports/TR-090731.pdf},
year = {2009}
}
@article{Chatterjee2002,
address = {New York, New York, USA},
author = {Chatterjee, Siddhartha and Lebeck, Alvin R. and Patnala, P.K. and Thottethodi, Mithuna},
doi = {10.1109/TPDS.2002.1058095},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Chatterjee et al. - 2002 - Recursive array layouts and fast matrix multiplication.pdf:pdf},
isbn = {1581131240},
issn = {1045-9219},
journal = {IEEE Transactions on Parallel and Distributed Systems},
month = nov,
number = {11},
pages = {1105--1123},
publisher = {ACM Press},
title = {{Recursive array layouts and fast matrix multiplication}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=1058095},
volume = {13},
year = {2002}
}
@article{Chebrolu2008,
author = {Chebrolu, Uday and Mitchell, Joseph S. B. and Kumar, Piyush},
doi = {10.1109/ICCSA.2008.25},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Chebrolu, Mitchell, Kumar - 2008 - On finding large empty convex bodies in 3D scenes of polygonal models .pdf:pdf},
journal = {Science},
pages = {382--393},
title = {{On finding large empty convex bodies in 3D scenes of polygonal models .}},
year = {2008}
}
@inproceedings{Chen2010,
abstract = {Parallel computing is an important method used in high performance computing. A new SIMD architecture named ESCA (Engineering and Science Computing Accelerator) is introduced briefly in this paper. It aims to accelerate the computation for most critical scientific workload as a coprocessor by virtue of outstanding architecture and flexible parallel algorithm. As dense matrix multiplication is a widely used operation that can be accelerated by parallel computing, we maps its algorithm onto ESCA and estimates the performance, and the results imply that ESCA has some advantage and potentiality.},
author = {Chen, Pan and Dai, Kui and Wu, Dan and Rao, Jinli and Zou, Xuecheng},
booktitle = {2010 IEEE Asia Pacific Conference on Circuits and Systems},
doi = {10.1109/APCCAS.2010.5774970},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Chen et al. - 2010 - The parallel algorithm implementation of matrix multiplication based on ESCA.pdf:pdf},
isbn = {978-1-4244-7454-7},
keywords = {printed},
mendeley-tags = {printed},
month = dec,
pages = {1091--1094},
publisher = {IEEE},
title = {{The parallel algorithm implementation of matrix multiplication based on ESCA}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5774970},
year = {2010}
}
@article{Chen2010a,
abstract = {Spectral clustering algorithm has been shown to be more effective in finding clusters than some traditional algorithms such as K-means. However, spectral clustering suffers from a scalability problem in both memory use and computational time when the size of a data set is large. To perform clustering on large data sets, we investigate two representative ways of approximating the dense similarity matrix. We compare one by sparsifying the matrix with another by the Nystrom method. We then pick the strategy of sparsifying the matrix via retaining nearest neighbors and investigate its parallelization. We parallelize both memory use and computation on distributed computers. Through an empirical study on a document data set of 193,844 instances and a photo data set of 2,121,863, we show that our parallel algorithm can effectively handle large problems.},
author = {Chen, Wen-Yen and Song, Yangqiu and Bai, Hongjie and Lin, Chih-Jen and Chang, Edward Y},
doi = {10.1109/TPAMI.2010.88},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Chen et al. - 2010 - Parallel Spectral Clustering in Distributed Systems.pdf:pdf},
issn = {19393539},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
keywords = {printed},
mendeley-tags = {printed},
number = {3},
pages = {1--32},
pmid = {20421667},
publisher = {Published by the IEEE Computer Society},
title = {{Parallel Spectral Clustering in Distributed Systems.}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/20421667},
volume = {33},
year = {2010}
}
@inproceedings{Choi,
abstract = {The author presents a fast and scalable matrix multiplication algorithm on distributed memory concurrent computers, whose performance is independent of data distribution on processors, and call it DIMMA (distribution-independent matrix multiplication algorithm). The algorithm is based on two new ideas; it uses a modified pipelined communication scheme to overlap computation and communication effectively, and exploits the LCM block concept to obtain the maximum performance of the sequential BLAS routine in each processor when the block size is too small as well as too large. The algorithm is implemented and compared with SUMMA on the Intel Paragon computer},
author = {Choi, Jaeyoung},
booktitle = {Proceedings 11th International Parallel Processing Symposium},
doi = {10.1109/IPPS.1997.580916},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Choi - 1997 - A fast scalable universal matrix multiplication algorithm on distributed-memory concurrent computers.pdf:pdf},
isbn = {0-8186-7793-7},
keywords = {printed},
mendeley-tags = {printed},
pages = {310--314},
publisher = {IEEE Comput. Soc. Press},
title = {{A fast scalable universal matrix multiplication algorithm on distributed-memory concurrent computers}},
url = {http://ieeexplore.ieee.org/xpl/freeabs\_all.jsp?arnumber=580916},
year = {1997}
}
@article{Choi1998,
author = {Choi, Jaeyoung},
doi = {10.1002/(SICI)1096-9128(199807)10:8<655::AID-CPE369>3.0.CO;2-O},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Choi - 1998 - A new parallel matrix multiplication algorithm on distributed-memory concurrent computers.pdf:pdf},
issn = {10403108},
journal = {Concurrency Practice and Experience},
keywords = {printed},
mendeley-tags = {printed},
number = {8},
pages = {655--670},
title = {{A new parallel matrix multiplication algorithm on distributed-memory concurrent computers}},
url = {http://doi.wiley.com/10.1002/(SICI)1096-9128(199807)10:8<655::AID-CPE369>3.0.CO;2-O},
volume = {10},
year = {1998}
}
@article{Chowdhury2010,
abstract = {We address the design of algorithms for multicores that are oblivious to machine parameters. We propose HM, a multicore model consisting of a parallel shared-memory machine with hierarchical multi-level caching, and we introduce a multicore-oblivious (MO) approach to algorithms and schedulers for HM. An MO algorithm is specified with no mention of any machine parameters, such as the number of cores, number of cache levels, cache sizes and block lengths. However, it is equipped with a small set of instructions that can be used to provide hints to the run-time scheduler on how to schedule parallel tasks. We present efficient MO algorithms for several fundamental problems including matrix transposition, FFT, sorting, the Gaussian Elimination Paradigm, list ranking, and connected components. The notion of an MO algorithm is complementary to that of a network-oblivious (NO) algorithm, recently introduced by Bilardi et al. for parallel distributed-memory machines where processors communicate point-to-point. We show that several of our MO algorithms translate into efficient NO algorithms, adding to the body of known efficient NO algorithms.},
author = {Chowdhury, Rezaul Alam and Silvestri, Francesco and Blakeley, Brandon and Ramachandran, Vijaya},
doi = {10.1109/IPDPS.2010.5470354},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Chowdhury et al. - 2010 - Oblivious algorithms for multicores and network of processors.pdf:pdf},
isbn = {9781424464425},
issn = {15302075},
journal = {2010 IEEE International Symposium on Parallel Distributed Processing IPDPS},
keywords = {algorithms,cache,gaussian elimination paradigm,list ranking,multicore,network,oblivious,printed},
mendeley-tags = {printed},
pages = {1--12},
publisher = {Ieee},
title = {{Oblivious algorithms for multicores and network of processors}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5470354},
year = {2010}
}
@misc{Chowdhury2009,
author = {Chowdhury, Rezaul Alam and Silvestri, Francesco and Blakeley, On and Ramach, Vijaya},
doi = {10.1.1.149.3304},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Chowdhury et al. - 2009 - Oblivious algorithms for multicore, network, and petascale computing.pdf:pdf},
keywords = {printed},
mendeley-tags = {printed},
title = {{Oblivious algorithms for multicore, network, and petascale computing}},
url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.149.3304},
year = {2009}
}
@misc{Chrisochoides,
author = {Chrisochoides, Nikos and Aboelaze, Mokhtar and Houstis, Elias and Houstis, Catehrine},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Chrisochoides et al. - Unknown - Scalable BLAS 2 and 3 Matrix Multiplication for Sparse Banded Matrices on Distributed Memory MIMD Machines.pdf:pdf},
title = {{Scalable BLAS 2 and 3 Matrix Multiplication for Sparse Banded Matrices on Distributed Memory MIMD Machines}},
url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.33.9748}
}
@article{Chtchelkanova1997,
author = {Chtchelkanova, Almadena and Gunnels, John and Morrow, Greg and Overfelt, James and {Van De Geijn}, Robert A.},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Chtchelkanova et al. - 1997 - Parallel Implementation of BLAS General Techniques for Level 3 BLAS.pdf:pdf},
journal = {Concurrency Practice and Experience},
keywords = {printed},
mendeley-tags = {printed},
number = {9},
pages = {837--857},
title = {{Parallel Implementation of BLAS: General Techniques for Level 3 BLAS}},
volume = {9},
year = {1997}
}
@article{Chu2007,
abstract = {We are at the beginning of the multicore era. Computers will have increasingly many cores (processors), but there is still no good programming framework for these architectures, and thus no simple and unified way for machine learning to take advantage of the potential speed up. In this paper, we develop a broadly ap- plicable parallel programming method, one that is easily applied to many different learning algorithms. Our work is in distinct contrast to the tradition in machine learning of designing (often ingenious) ways to speed up a single algorithm at a time. Specifically, we showthat algorithms that fit the Statistical Query model 15 can be written in a certain summation form, which allows them to be easily par- allelized on multicore computers. We adapt Googles map-reduce 7 paradigm to demonstrate this parallel speed up technique on a variety of learning algorithms including locally weighted linear regression (LWLR), k-means, logistic regres- sion (LR), naive Bayes (NB), SVM, ICA, PCA, gaussian discriminant analysis (GDA), EM, and backpropagation (NN). Our experimental results show basically linear speedup with an increasing number of processors.},
author = {Chu, Cheng-tao and Kim, Sang Kyun and Lin, Yi-an and Ng, Andrew Y},
doi = {10.1234/12345678},
editor = {Sch\"{o}lkopf, Bernhard and Platt, John C and Hoffman, Thomas},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Chu et al. - 2007 - Map-Reduce for Machine Learning on Multicore.pdf:pdf},
institution = {Stanford},
isbn = {0262195682},
issn = {10495258},
journal = {Architecture},
number = {23},
pages = {281},
publisher = {The MIT Press},
title = {{Map-Reduce for Machine Learning on Multicore}},
url = {http://www.cs.stanford.edu/people/ang//papers/nips06-mapreducemulticore.pdf},
volume = {19},
year = {2007}
}
@book{Chung1999,
abstract = {In this paper, we propose three tree-based parallel load-balancing methods, the MCSTPLB method, the BTPLB method, and the CBTPLB method, to deal with the load unbalancing problems of solution-adaptive finite element application programs. To evaluate the performance of the proposed methods, we have implemented those methods along with three mapping methods, the AE/ORB method, the AE/MC method, and the MLkP method, on an SP2 parallel machine. The experimental results show that (1) if a mapping method is used for the initial partitioning and this mapping method or a load-balancing method is used in each refinement, the execution time of an application program under a load-balancing method is always shorter than that of the mapping method. (2) The execution time of an application program under the CBTPLB method is better than that of the BTPLB method that is better than that of the MCSTPLB method. The work of this paper was partially supported by NCHC of R.O.C. under contract NCHC-86-08-021.},
address = {Berlin/Heidelberg},
author = {Chung, Yeh-Ching and Liao, Ching-Jung},
booktitle = {IEEE Transactions on Parallel and Distributed Systems},
doi = {10.1109/71.762816},
editor = {Ferreira, Alfonso and Rolim, Jos\'{e} and Simon, Horst and Teng, Shang-Hua},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Chung, Liao - 1999 - Tree-based parallel load-balancing methods for solution-adaptive finite element graphs on distributed memory multicomputers.pdf:pdf},
isbn = {3-540-64809-7},
issn = {10459219},
month = apr,
number = {4},
pages = {360--370},
publisher = {Springer-Verlag},
series = {Lecture Notes in Computer Science},
title = {{Tree-based parallel load-balancing methods for solution-adaptive finite element graphs on distributed memory multicomputers}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=762816},
volume = {10},
year = {1999}
}
@book{Clarke2009,
author = {Clarke, Bertrand and Fokou\'{e}, Ernest and Zhang, Hao Helen},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Clarke, Fokou\'{e}, Zhang - 2009 - Principles and Theory for Data Mining and Machine learning.pdf:pdf},
isbn = {978-0-387-98134-5},
issn = {0172-7397},
language = {English},
pages = {793},
publisher = {Springer},
title = {{Principles and Theory for Data Mining and Machine learning}},
year = {2009}
}
@book{Claverie2006,
abstract = {Were you always curious about biology but were afraid to sit through long hours of dense reading? Did you like the subject when you were in high school but had other plans after you graduated? Now you can explore the human genome and analyze DNA without ever leaving your desktop!
Bioinformatics For Dummies is packed with valuable information that introduces you to this exciting new discipline. This easy-to-follow guide leads you step by step through every bioinformatics task that can be done over the Internet. Forget long equations, computer-geek gibberish, and installing bulky programs that slow down your computer. You’ll be amazed at all the things you can accomplish just by logging on and following these trusty directions. You get the tools you need to:

Analyze all types of sequences
Use all types of databases
Work with DNA and protein sequences
Conduct similarity searches
Build a multiple sequence alignment
Edit and publish alignments
Visualize protein 3-D structures
Construct phylogenetic trees
This up-to-date second edition includes newly created and popular databases and Internet programs as well as multiple new genomes. It provides tips for using servers and places to seek resources to find out about what’s going on in the bioinformatics world. Bioinformatics For Dummies will show you how to get the most out of your PC and the right Web tools so you’ll be searching databases and analyzing sequences like a pro!},
author = {Claverie, Jean-Michel and Notredame, Cedric},
edition = {2},
isbn = {9780470089859},
pages = {456},
publisher = {For Dummies},
title = {{Bioinformatics for Dummies}},
year = {2006}
}
@article{Cohen1998,
abstract = {We consider the problem of predicting the nonzero structure of a product of two or more matrices. Prior knowledge of the nonzero structure can be applied to optimize memory allocation and to determine the optimal multiplication order for a chain product of sparse matrices. We adapt a recent algorithm by the author and show that the essence of the nonzero structure and hence, a near-optimal order of multiplications, can be determined in near-linear time in the number of nonzero entries, which is much smaller than the time required for the multiplications. An experimental evaluation of the algorithm demonstrates that it is practical for matrices of order 10\^{}3 with 10\^{}4 nonzeros (or larger). A relatively small pre-computation results in a large time saved in the computation-intensive multiplication.},
author = {Cohen, Edith},
doi = {10.1023/A:1009716300509},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Cohen - 1998 - Structure Prediction and Computation of Sparse Matrix Products.pdf:pdf},
journal = {Journal of Combinatorial Optimization},
keywords = {matrix multiplication,nonzero structure,printed,size estimation,sparse matrices,structure prediction},
mendeley-tags = {printed},
number = {4},
pages = {307--332},
title = {{Structure Prediction and Computation of Sparse Matrix Products}},
url = {http://www.springerlink.com/content/p328542122022748/},
volume = {2},
year = {1998}
}
@inproceedings{Cohen2011,
abstract = {The massive data streams observed in network monitoring, data processing and scientific studies are typically too large to store. For many applications over such data, we must obtain compact summaries of the stream. These summaries should allow accurate answering of post hoc queries with estimates which approximate the true answers over the original stream. The data often has an underlying structure which makes certain subset queries, in particular range queries, more relevant than arbitrary subsets. Applications such as access control, change detection, and heavy hitters typically involve subsets that are ranges or unions thereof. Random sampling is a natural summarization tool, being easy to implement and flexible to use. Known sampling methods are good for arbitrary queries but fail to optimize for the common case of range queries. Meanwhile, specialized summarization algorithms have been proposed for rangesum queries and related problems. These can outperform sampling giving fixed space resources, but lack its flexibility and simplicity. Particularly, their accuracy degrades when queries span multiple ranges. We define new stream sampling algorithms with a smooth and tunable trade-off between accuracy on range-sum queries and arbitrary subset-sum queries. The technical key is to relax requirements on the variance over all subsets to enable better performance on the ranges of interest. This boosts the accuracy on range queries while retaining the prime benefits of sampling, in particular flexibility and accuracy, with tail bounds guarantees. Our experimental study indicates that structure-aware summaries can drastically improve range-sum accuracy with respect to state-of-the-art stream sampling algorithms and outperform deterministic methods on range-sum queries and hierarchical heavy hitter queries.},
address = {New York, New York, USA},
author = {Cohen, Edith and Cormode, Graham and Duffield, Nick},
booktitle = {Proceedings of the ACM SIGMETRICS joint international conference on Measurement and modeling of computer systems - SIGMETRICS '11},
doi = {10.1145/1993744.1993763},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Cohen, Cormode, Duffield - 2011 - Structure-aware sampling on data streams.pdf:pdf},
isbn = {9781450308144},
keywords = {approximate query processing,data streams,printed,structure-aware sampling,varopt},
mendeley-tags = {printed},
month = jun,
pages = {197},
publisher = {ACM Press},
title = {{Structure-aware sampling on data streams}},
url = {http://dl.acm.org/citation.cfm?id=1993744.1993763},
year = {2011}
}
@article{Cohen1999,
abstract = {Many pattern recognition tasks, including estimation, classification, and the finding of similar objects, make use of linear models. The fundamental operation in such tasks is the computation of the dot product between a query vector and a large database of instance vectors. Often we are interested primarily in those instance vectors which have high dot products with the query. We present a random sampling based algorithm that enables us to identify, for any given query vector, those instance vectors which have large dot products, while avoiding explicit computation of all dot products. We provide experimental results that demonstrate considerable speedups for text retrieval tasks. Our approximate matrix multiplica- tion algorithm is applicable to products of k \&gt;= 2 matrices and is of independent interest. Our theoretical and experimental analysis demonstrates that in many scenarios, our method dominates standard matrix multiplication.},
author = {Cohen, Edith and Lewis, David D},
doi = {10.1006/jagm.1998.0989},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Cohen, Lewis - 1999 - Approximating Matrix Multiplication for Pattern Recognition Tasks.pdf:pdf},
issn = {01966774},
journal = {Journal of Algorithms},
keywords = {printed},
mendeley-tags = {printed},
number = {2},
pages = {211--252},
title = {{Approximating Matrix Multiplication for Pattern Recognition Tasks}},
url = {http://linkinghub.elsevier.com/retrieve/pii/S0196677498909890},
volume = {30},
year = {1999}
}
@inproceedings{Cohn2005,
abstract = {We further develop the group-theoretic approach to fast matrix multiplication introduced by Cohn and Umans, and for the first time use it to derive algorithms asymptotically faster than the standard algorithm. We describe several families of wreath product groups that achieve matrix multiplication exponent less than 3, the asymptotically fastest of which achieves exponent 2.41. We present two conjectures regarding specific improvements, one combinatorial and the other algebraic. Either one would imply that the exponent of matrix multiplication is 2.},
author = {Cohn, H. and Kleinberg, R. and Szegedy, B. and Umans, C.},
booktitle = {46th Annual IEEE Symposium on Foundations of Computer Science (FOCS'05)},
doi = {10.1109/SFCS.2005.39},
isbn = {0-7695-2468-0},
keywords = {printed},
mendeley-tags = {printed},
pages = {379--388},
publisher = {IEEE},
title = {{Group-theoretic Algorithms for Matrix Multiplication}},
url = {http://ieeexplore.ieee.org/xpl/freeabs\_all.jsp?arnumber=1530730},
year = {2005}
}
@inproceedings{Cohn,
abstract = {We develop a new, group-theoretic approach to bounding the exponent of matrix multiplication. There are two components to this approach: (1) identifying groups G that admit a certain type of embedding of matrix multiplication into the group algebra C[G], and (2) controlling the dimensions of the irreducible representations of such groups. We present machinery and examples to support (1), including a proof that certain families of groups of order n2+o(1) support n × n matrix multiplication, a necessary condition for the approach to yield exponent 2. Although we cannot yet completely achieve both (1) and (2), we hope that it may be possible, and we suggest potential routes to that result using the constructions in this paper.},
author = {Cohn, H. and Umans, C.},
booktitle = {44th Annual IEEE Symposium on Foundations of Computer Science, 2003. Proceedings.},
doi = {10.1109/SFCS.2003.1238217},
isbn = {0-7695-2040-5},
keywords = {printed},
mendeley-tags = {printed},
pages = {438--449},
publisher = {IEEE Computer. Soc},
title = {{A group-theoretic approach to fast matrix multiplication}},
url = {http://ieeexplore.ieee.org/xpl/freeabs\_all.jsp?arnumber=1238217},
year = {2003}
}
@article{Cole2011a,
abstract = {We consider the design of efficient algorithms for a multicore computing environment with a global shared memory and p cores, each having a cache of size M, and with data organized in blocks of size B. We characterize the class of `Hierarchical Balanced Parallel (HBP)' multithreaded computations for multicores. HBP computations are similar to the hierarchical divide \& conquer algorithms considered in recent work, but have some additional features that guarantee good performance even when accounting for the cache misses due to false sharing. Most of our HBP algorithms are derived from known cache-oblivious algorithms with high parallelism, however we incorporate new techniques that reduce the effect of false-sharing. Our approach to addressing false sharing costs (or more generally, block misses) is to ensure that any task that can be stolen shares O(1) blocks with other tasks. We use a gapping technique for computations that have larger than O(1) block sharing. We also incorporate the property of limited access writes analyzed in a companion paper, and we bound the cost of accessing shared blocks on the execution stacks of tasks. We present the Priority Work Stealing (PWS) scheduler, and we establish that, given a sufficiently `tall' cache, PWS deterministically schedules several highly parallel HBP algorithms, including those for scans, matrix computations and FFT, with cache misses bounded by the sequential complexity, when accounting for both traditional cache misses and for false sharing. We also present a list ranking algorithm with almost optimal bounds. PWS schedules without using cache or block size information, and uses knowledge of processors only to the extent of determining the available locations from which tasks may be stolen; thus it schedules resource-obliviously.},
author = {Cole, Richard and Ramachandran, Vijaya},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Cole, Ramachandran - 2011 - Efficient Resource Oblivious Algorithms for Multicores.pdf:pdf},
journal = {Arxiv preprint arXiv11034071},
keywords = {printed},
mendeley-tags = {printed},
title = {{Efficient Resource Oblivious Algorithms for Multicores}},
url = {http://arxiv.org/abs/1103.4071},
year = {2011}
}
@article{Cole2011,
abstract = {This paper analyzes the cache miss cost of algorithms when scheduled using randomized work stealing (RWS) in a parallel environment, taking into account the effects of false sharing. First, prior analyses (due to Acar et al.) are extended to incorporate false sharing. However, to control the possible delays due to false sharing, some restrictions on the algorithms seem necessary. Accordingly, the class of Hierarchical Tree algorithms is introduced and their performance analyzed. In addition, the paper analyzes the performance of a subclass of the Hierarchical Tree Algorithms, called HBP algorithms, when scheduled using RWS; improved complexity bounds are obtained for this subclass. This class was introduced in a companion paper with efficient resource oblivious computation in mind. Finally, we note that in a scenario in which there is no false sharing the results in this paper match prior bounds for cache misses but with reduced assumptions, and in particular with no need for a bounding concave function for the cost of cache misses as in prior work by Frigo and Strumpen. This allows non-trivial cache miss bounds in this case to be obtained for a larger class of algorithms.},
archivePrefix = {arXiv},
arxivId = {1103.4142},
author = {Cole, Richard and Ramachandran, Vijaya},
eprint = {1103.4142},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Cole, Ramachandran - 2011 - Analysis of Randomized Work Stealing with False Sharing.pdf:pdf},
journal = {CoRR},
month = mar,
title = {{Analysis of Randomized Work Stealing with False Sharing}},
url = {http://arxiv.org/abs/1103.4142},
year = {2011}
}
@inproceedings{Connor2008,
abstract = {We present a parallel algorithm for k-nearest neighbor graph construction that uses Morton ordering. Experiments show that our approach has the following advantages over existing methods: 1) faster construction of k-nearest neighbor graphs in practice on multicore machines, 2) less space usage, 3) better cache efficiency, 4) ability to handle large data sets, and 5) ease of parallelization and implementation. If the point set has a bounded expansion constant, our algorithm requires one-comparison-based parallel sort of points, according to Morton order plus near-linear additional steps to output the k-nearest neighbor graph.},
author = {Connor, Michael and Kumar, Piyush},
booktitle = {IEEE Transactions on Visualization and Computer Graphics},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Connor, Kumar - 2009 - Fast construction of k-nearest neighbor graphs for point clouds.pdf:pdf;:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Connor, Kumar - 2008 - Parallel Construction of k-Nearest Neighbor Graphs for Point Clouds.pdf:pdf},
number = {4},
organization = {Department of Computer Science, Florida State University, Tallahassee, FL 32306, USA. miconnor@cs.fsu.edu},
pages = {599--608},
pmid = {20467058},
publisher = {Citeseer},
title = {{Parallel Construction of k-Nearest Neighbor Graphs for Point Clouds}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/20467058},
volume = {16},
year = {2008}
}
@article{Cormode2008,
author = {Cormode, Graham and Hadjieleftheriou, Marios},
doi = {10.1145/1454159.1454225},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Cormode, Hadjieleftheriou - 2008 - Finding frequent items in data streams.pdf:pdf;:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Cormode, Hadjieleftheriou - 2008 - Finding frequent items in data streams(2).pdf:pdf},
issn = {2150-8097},
journal = {Proceedings of the VLDB Endowment},
keywords = {printed},
mendeley-tags = {printed},
month = aug,
number = {2},
pages = {1530--1541},
title = {{Finding frequent items in data streams}},
url = {http://dl.acm.org/citation.cfm?id=1454159.1454225},
volume = {1},
year = {2008}
}
@article{Costa2010,
abstract = {Immunophenotypic characterization of B-cell chronic lymphoproliferative disorders (B-CLPD) is becoming increasingly complex due to usage of progressively larger panels of reagents and a high number of World Health Organization (WHO) entities. Typically, data analysis is performed separately for each stained aliquot of a sample; subsequently, an expert interprets the overall immunophenotypic profile (IP) of neoplastic B-cells and assigns it to specific diagnostic categories. We constructed a principal component analysis (PCA)-based tool to guide immunophenotypic classification of B-CLPD. Three reference groups of immunophenotypic data files-B-cell chronic lymphocytic leukemias (B-CLL; n = 10), mantle cell (MCL; n = 10) and follicular lymphomas (FL; n = 10)--were built. Subsequently, each of the 175 cases studied was evaluated and assigned to either one of the three reference groups or to none of them (other B-CLPD). Most cases (89\%) were correctly assigned to their corresponding WHO diagnostic group with overall positive and negative predictive values of 89 and 96\%, respectively. The efficiency of the PCA-based approach was particularly high among typical B-CLL, MCL and FL vs other B-CLPD cases. In summary, PCA-guided immunophenotypic classification of B-CLPD is a promising tool for standardized interpretation of tumor IP, their classification into well-defined entities and comprehensive evaluation of antibody panels.},
author = {Costa, E S and Pedreira, C E and Barrena, S and Lecrevisse, Q and Flores, J and Quijano, S and Almeida, J and {del Carmen Garc\'{\i}a-Macias}, M and Bottcher, S and van Dongen, Jacques J M and Orfao, A},
doi = {10.1038/leu.2010.160},
issn = {1476-5551},
journal = {Leukemia : official journal of the Leukemia Society of America, Leukemia Research Fund, U.K},
keywords = {80 and over,Adult,Aged,Antigens,Automation,B-Cell,B-Cell: immunology,B-Cell: pathology,B-Lymphocytes,B-Lymphocytes: immunology,B-Lymphocytes: pathology,CD,CD: immunology,Chronic,Female,Flow Cytometry,Flow Cytometry: methods,Follicular,Follicular: immunology,Follicular: pathology,Humans,Immunoglobulin A,Immunoglobulin A: immunology,Immunophenotyping,Immunophenotyping: methods,Leukemia,Lymphocytic,Lymphoma,Lymphoma: immunology,Lymphoma: pathology,Male,Mantle-Cell,Mantle-Cell: immunology,Mantle-Cell: pathology,Middle Aged,Predictive Value of Tests},
month = nov,
number = {11},
pages = {1927--33},
pmid = {20844562},
title = {{Automated pattern-guided principal component analysis vs expert-based immunophenotypic classification of B-cell chronic lymphoproliferative disorders: a step forward in the standardization of clinical immunophenotyping.}},
url = {http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=3035971\&tool=pmcentrez\&rendertype=abstract},
volume = {24},
year = {2010}
}
@inproceedings{AdamCovington2006,
author = {Covington, Adam G. and Comstock, Charles L.G. and Levine, Andrew A. and {W. Lockwood}, John and Cho, Young H.},
booktitle = {FPL06},
doi = {FPL.2006.311245},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Covington et al. - 2006 - High speed document clustering in reconfigurable hardware.pdf:pdf},
title = {{High speed document clustering in reconfigurable hardware}},
year = {2006}
}
@article{D'Alberto2009,
author = {D'Alberto, Paolo and Nicolau, Alexandru},
doi = {10.1145/1486525.1486528},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/D'Alberto, Nicolau - 2009 - Adaptive Winograd's matrix multiplications.pdf:pdf},
issn = {00983500},
journal = {ACM Transactions on Mathematical Software},
keywords = {Winograd's matrix multiplications,fast algorithms,printed},
mendeley-tags = {printed},
month = mar,
number = {1},
pages = {1--23},
title = {{Adaptive Winograd's matrix multiplications}},
url = {http://dl.acm.org/citation.cfm?id=1486525.1486528},
volume = {36},
year = {2009}
}
@article{Daggett2006,
abstract = {Protein unfolding simulations are expected to fairly reliably depict protein folding/unfolding transition states, intermediate states, and denatured states, provided explicit solvent and good simulation techniques are employed. Simulations provide a molecular framework for the interpretation of experimental protein folding studies, and they are readily amenable to validation by comparison with experiment. An understanding of these various conformational states can aid in the design of faster folding proteins, as well as more stable proteins.},
author = {Daggett, Valerie},
institution = {Department of Medicinal Chemistry, Box 357610, University of Washington, Seattle, Washington 98195-7610, USA.},
journal = {Chemical Reviews},
number = {5},
pages = {1898--1916},
pmid = {16683760},
publisher = {John Wiley $\backslash$\& Sons},
title = {{Protein folding-simulation.}},
url = {http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=2291534\&tool=pmcentrez\&rendertype=abstract},
volume = {106},
year = {2006}
}
@inproceedings{Das2009,
abstract = {Frequency counting, frequent elements and top-k queries form a class of operators that are used for a wide range of stream analysis applications. In spite of the abundance of these algorithms, all known techniques for answering data stream queries are sequential in nature. The imminent ubiquity of Chip Multi-Processor (CMP) architectures requires algorithms that can exploit the parallelism of such architectures. In this paper, we first evaluate different naive techniques for intra-operator parallelism, and summarize the insights obtained from the naive techniques. Our experimental analysis of the naive designs shows that intra-operator parallelism is not straightforward and requires a complete redesign of the system. We then propose an efficient and scalable framework for parallelizing frequency counting, frequent elements and top-k queries over data streams. The proposed CoTS (Co-operative Thread Scheduling) framework is based on the principle of threads co-operating rather than contending. Our experiments on a state-of-the-art quad-core chip multiprocessor architecture and synthetic data sets demonstrate the scalability of the proposed framework, and the efficiency is demonstrated by peak processing throughput of more than 60 million elements per second.},
author = {Das, Sudipto and Antony, Shyam and Agrawal, Divyakant and Abbadi, Amr El},
booktitle = {2009 IEEE 25th International Conference on Data Engineering},
doi = {10.1109/ICDE.2009.231},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Das et al. - 2009 - CoTS A Scalable Framework for Parallelizing Frequency Counting over Data Streams.pdf:pdf},
isbn = {978-1-4244-3422-0},
issn = {1084-4627},
month = mar,
pages = {1323--1326},
publisher = {IEEE},
title = {{CoTS: A Scalable Framework for Parallelizing Frequency Counting over Data Streams}},
url = {http://www.computer.org/portal/web/csdl/doi/10.1109/ICDE.2009.231},
year = {2009}
}
@article{Demaine2002,
abstract = {A recent direction in the design of cache-ecient and disk- ecient algorithms and data structures is the notion of cache oblivi- ousness, introduced by Frigo, Leiserson, Prokop, and Ramachandran in 1999. Cache-oblivious algorithms perform well on a multilevel memory hierarchy without knowing any parameters of the hierarchy, only know- ing the existence of a hierarchy. Equivalently, a single cache-oblivious algorithm is ecient on all memory hierarchies simultaneously. While such results might seem impossible, a recent body of work has devel- oped cache-oblivious algorithms and data structures that perform as well or nearly as well as standard external-memory structures which require knowledge of the cache/memory size and block transfer size. Here we describe several of these results with the intent of elucidating the tech- niques behind their design. Perhaps the most exciting of these results are the data structures, which form general building blocks immediately leading to several algorithmic results.},
author = {Demaine, Erik D},
doi = {10.1007/s11227-007-0106-8},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Demaine - 2002 - Cache-Oblivious Algorithms and Data Structures.pdf:pdf},
institution = {BRICS, University of Aarhus, Denmark},
journal = {Lecture Notes from the EEF Summer School on Massive Data Sets},
keywords = {printed},
mendeley-tags = {printed},
number = {4},
pages = {1--249},
publisher = {Citeseer},
title = {{Cache-Oblivious Algorithms and Data Structures}},
url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.95.1230\&amp;rep=rep1\&amp;type=pdf},
volume = {8},
year = {2002}
}
@article{Demaine2002a,
abstract = {We consider a router on the Internet analyzing the statistical properties of a TCP/IP packet stream. A fundamental difficulty with measuring traffic behavior on the Internet is that there is simply too much data to be recorded for later analysis, on the order of gigabytes a second. As a result, network routers can collect only relatively few statistics about the data. The central problem addressed here is to use the limited memory of routers to determine essential features of the network traffic stream. A particularly difficult and representative subproblem is to determine the top k categories to which the most packets belong, for a desired value of k and for a given notion of categorization such as the destination IP address. We present an algorithm that deterministically finds (in particular) all categories having a frequency above 1/(m + 1) using m counters, which we prove is best possible in the worst case. We also present a sampling-based algorithm for the case that packet categories follow an arbitrary distribution, but their order over time is permuted uniformly at random. Under this model, our algorithm identifies flows above a frequency threshold of roughly 1 / √ nm with high probability, where m is the number of counters and n is the number of packets observed. This guarantee is not far off from the ideal of identifying all flows (probability 1/n), and we prove that it is best possible up to a logarithmic factor. We show that the algorithm ranks the identified flows according to frequency within any desired constant factor of accuracy},
author = {Demaine, Erik D. and L\'{o}pez-ortiz, Ro and Munro, J. Ian},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Demaine, L\'{o}pez-ortiz, Munro - 2002 - Frequency estimation of internet packet streams with limited space.pdf:pdf},
journal = {ESA '02 Proceedings of the 10th Annual European Symposium on Algorithms},
pages = {348 -- 360},
title = {{Frequency estimation of internet packet streams with limited space}},
url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.100.7878},
year = {2002}
}
@article{Dey1999,
author = {Dey, Tamal K and Kumar, Piyush},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Dey, Kumar - 1999 - A Simple Provable Algorithm for Curve Reconstruction.pdf:pdf},
journal = {Image Rochester NY},
pages = {3--4},
publisher = {Society for Industrial and Applied Mathematics},
title = {{A Simple Provable Algorithm for Curve Reconstruction}},
url = {http://portal.acm.org/citation.cfm?id=314500.315073},
year = {1999}
}
@inproceedings{Dharwez2011,
address = {New York, New York, USA},
author = {Dharwez, S. S. and Karpagam, B.},
booktitle = {Proceedings of the International Conference \& Workshop on Emerging Trends in Technology - ICWET '11},
doi = {10.1145/1980022.1980119},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Dharwez, Karpagam - 2011 - KS ring theoretic approach for matrix multiplication.pdf:pdf},
isbn = {9781450304498},
keywords = {eigenvalues,matrix multiplication,ring,time complexity,trace},
month = feb,
pages = {455},
publisher = {ACM Press},
title = {{KS ring theoretic approach for matrix multiplication}},
url = {http://dl.acm.org/citation.cfm?id=1980022.1980119},
year = {2011}
}
@article{Dhillon2001,
abstract = {Both document clustering and word clustering are well studied problems. Most existing algorithms cluster documents and words separately but not simultaneously. In this paper we present the novel idea of modeling the document collection as a bipartite graph between documents and words, using which the simultaneous clustering problem can be posed as a bipartite graph partitioning problem. To solve the partitioning problem, we use a new spectral co-clustering algorithm that uses the second left and right singular vectors of an appropriately scaled word-document matrix to yield good bipartitionings. The spectral algorithm enjoys some optimality properties; it can be shown that the singular vectors solve a real relaxation to the NP-complete graph bipartitioning problem. We present experimental results to verify that the resulting co-clustering algorithm works well in practice.},
author = {Dhillon, Inderjit S},
doi = {10.1145/502512.502550},
editor = {Provost, F and Srikant, R and Schkolnick, M and Lee, D},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Dhillon - 2001 - Co-clustering documents and words using bipartite spectral graph partitioning.pdf:pdf},
institution = {UT Austin CS Dept.},
isbn = {158113391X},
journal = {Proceedings of the 7th ACM SIGKDD international conference on Knowledge discovery and data mining KDD 01},
number = {April 2006},
pages = {269--274},
publisher = {ACM Press},
title = {{Co-clustering documents and words using bipartite spectral graph partitioning}},
url = {http://portal.acm.org/citation.cfm?doid=502512.502550},
volume = {pages},
year = {2001}
}
@article{Dhillon1999,
abstract = {To cluster increasingly massive data sets that are common today in data and text mining, we propose a parallel implementation of the k-means clustering algorithm based on the message passing model. The proposed algorithm exploits the inherent data-parallelism in the k-means algorithm. We analytically show that the speedup and the scaleup of our algorithm approach the optimal as the number of data points increases. We implemented our algorithm on an IBM POWERparallel SP2 with a maximum of 16 nodes. On typical test data sets, we observe nearly linear relative speedups, for example, 15.62 on 16 nodes, and essentially linear scaleup in the size of the data set and in the number of clusters desired. For a 2 gigabyte test data set, our implementation drives the 16 node SP2 at more than 1,8 gigaflops.},
author = {Dhillon, Inderjit S and Modha, Dharmendra S},
chapter = {13},
doi = {10.1007/3-540-46502-2},
editor = {Zaki, Mohammed J and Ho, Ching-Tien},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Dhillon, Modha - 1999 - A data-clustering algorithm on distributed memory multiprocessors.pdf:pdf},
isbn = {3540671943},
journal = {LargeScale Parallel Data Mining},
keywords = {printed},
mendeley-tags = {printed},
number = {802},
pages = {245--260},
publisher = {Springer},
series = {Lecture Notes in Artificial Intelligence},
title = {{A data-clustering algorithm on distributed memory multiprocessors}},
url = {http://www.springerlink.com/index/87XXPDP6KLXA0LX5.pdf},
volume = {1759},
year = {1999}
}
@article{Domeniconi2004,
abstract = {Clustering suffers from the curse of dimensionality, and sim- ilarity functions that use all input features with equal rele- vance may not be effective. We introduce an algorithm that discovers clusters in subspaces spanned by different combi- nations of dimensions via local weightings of features. This approach avoids the risk of loss of information encountered in global dimensionality reduction techniques, and does not assume any data distribution model. Our method asso- ciates to each cluster a weight vector, whose values capture the relevance of features within the corresponding cluster. We experimentally demonstrate the gain in perfomance our method achieves, using both synthetic and real data sets. In particular, our results show the feasibility of the proposed technique to perform simultaneous clustering of genes and conditions in microarray data.},
author = {Domeniconi, Carlotta and Papadopoulos, Dimitris and Liu, Huan},
doi = {10.1145/1007730.1007731},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Domeniconi, Papadopoulos, Liu - 2004 - Subspace clustering for high dimensional data.pdf:pdf},
issn = {19310145},
journal = {ACM SIGKDD Explorations Newsletter},
number = {1},
pages = {90--105},
title = {{Subspace clustering for high dimensional data}},
url = {http://portal.acm.org/citation.cfm?doid=1007730.1007731},
volume = {6},
year = {2004}
}
@inproceedings{Dongarra,
author = {Dongarra, Jack and Walker, D. W.},
booktitle = {Proceedings of 8th International Parallel Processing Symposium},
doi = {10.1109/IPPS.1994.288214},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Dongarra, Walker - 1994 - The design of scalable software libraries for distributed memory concurrent computers.pdf:pdf},
isbn = {0-8186-5602-6},
pages = {792--799},
publisher = {IEEE Comput. Soc. Press},
title = {{The design of scalable software libraries for distributed memory concurrent computers}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=288214},
year = {1994}
}
@article{Drineas1999,
author = {Drineas, P. and Frieze, Alan and Kannan, Ravi and Vempala, Santosh and Vinay, V.},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Drineas et al. - 1999 - Clustering in large graphs and matrices.pdf:pdf},
isbn = {0-89871-434-6},
month = jan,
pages = {291--299},
title = {{Clustering in large graphs and matrices}},
url = {http://dl.acm.org/citation.cfm?id=314500.314576},
year = {1999}
}
@article{Drineas,
author = {Drineas, Petros and Frieze, A and Kannan, Ravi and Vempala, S and Vinay, V},
doi = {10.1023/B:MACH.0000033113.59016.96},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Drineas et al. - 2004 - Clustering Large Graphs via the Singular Value Decomposition.pdf:pdf},
issn = {0885-6125},
journal = {Machine Learning},
keywords = {Singular Value Decomposition,approximation,k-means clustering,printed,randomized algorithms},
mendeley-tags = {printed},
month = jul,
number = {1-3},
pages = {9--33},
title = {{Clustering Large Graphs via the Singular Value Decomposition}},
url = {http://apps.webofknowledge.com/full\_record.do?product=WOS\&search\_mode=RelatedRecords\&qid=2\&SID=V1dGdjIgFanpbLh1Kbe\&page=1\&doc=8 http://www.springerlink.com/openurl.asp?id=doi:10.1023/B:MACH.0000033113.59016.96},
volume = {56},
year = {2004}
}
@inproceedings{Drineasa,
author = {Drineas, Petros and Kannan, Ravi},
booktitle = {Proceedings 2001 IEEE International Conference on Cluster Computing},
doi = {10.1109/SFCS.2001.959921},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Drineas, Kannan - 2001 - Fast Monte-Carlo algorithms for approximate matrix multiplication.pdf:pdf},
isbn = {0-7695-1116-3},
keywords = {printed},
mendeley-tags = {printed},
pages = {452--459},
publisher = {IEEE Comput. Soc},
title = {{Fast Monte-Carlo algorithms for approximate matrix multiplication}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=959921},
year = {2001}
}
@article{Drineas2003,
author = {Drineas, Petros and Kannan, Ravi},
doi = {10.1145/644108.644147},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Drineas, Kannan - 2003 - Pass efficient algorithms for approximating large matrices.pdf:pdf},
isbn = {0898715385},
journal = {Proceedings of the 14th annual ACMSIAM symposium on Discrete algorithms},
keywords = {printed},
mendeley-tags = {printed},
pages = {223--232},
publisher = {Society for Industrial and Applied Mathematics Philadelphia, PA, USA},
title = {{Pass efficient algorithms for approximating large matrices}},
url = {http://portal.acm.org/citation.cfm?id=644147},
year = {2003}
}
@article{Drineas2006b,
author = {Drineas, Petros and Kannan, Ravi and Mahoney, Michael W},
doi = {10.1137/S0097539704442702},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Drineas, Kannan, Mahoney - 2006 - Fast Monte Carlo Algorithms for Matrices III Computing a Compressed Approximate Matrix Decomposition.pdf:pdf},
issn = {00975397},
journal = {SIAM Journal on Computing},
keywords = {printed},
mendeley-tags = {printed},
number = {1},
pages = {184--206},
publisher = {Citeseer},
title = {{Fast Monte Carlo Algorithms for Matrices III: Computing a Compressed Approximate Matrix Decomposition}},
url = {http://link.aip.org/link/SMJCAT/v36/i1/p184/s1\&Agg=doi},
volume = {36},
year = {2006}
}
@article{Drineas2006a,
abstract = {In many applications, the data consist of (or may be naturally formulated as) an m times n matrix A. It is often of interest to find a low-rank approximation to A, i.e., an approximation D to the matrix A of rank not greater than a specified rank k, where k is much smaller than m and n. Methods such as the singular value decomposition (SVD) may be used to find an approximation to A which is the best in a well-defined sense. These methods require memory and time which are superlinear in m and n; for many applications in which the data sets are very large this is prohibitive. Two simple and intuitive algorithms are presented which, when given an m times n matrix A, compute a description of a low-rank approximation D to A, and which are qualitatively faster than the SVD. Both algorithms have provable bounds for the error matrix A-D . For any matrix X, let XbackslashF and Xbackslash2 denote its Frobenius norm and its spectral norm, respectively. In the first algorithm, c columns of A are randomly chosen. If the m times c matrix C consists of those c columns of A (after appropriate rescaling), then it is shown that from C TC approximations to the top singular values and corresponding singular vectors may be computed. From the computed singular vectors a description D of the matrix A may be computed such that mathrmrank(D ) le k and such that backslashleftbackslashA-D backslashrightbackslashbackslashxi 2 backslashle k backslashleftbackslashA-Dbackslashrightbackslashbackslashxi 2 + poly(k,1/c) holds with high probability for both xi = 2,F. This algorithm may be implemented without storing the matrix A in random access memory (RAM), provided it can make two passes over the matrix stored in external memory and use O(cm+c 2) additional RAM. The second algorithm is similar except that it further approximates the matrix C by randomly sampling r rows of C to form a r times c matrix W. Thus, it has additional error, but it can be implemented in three passes over the matrix using only constant additional RAM. To achieve an additional error (beyond the best rank k approximation) that is at most both algorithms take time which is polynomial in k, 1/epsilon, and log(1/delta), where delta>0 is a failure probability; the first takes time linear in mboxmax(m,n) and the second takes time independent of m and n. Our bounds improve previously published results with respect to the rank parameter k for both the Frobenius and spectral norms. In addition, the proofs for the error bounds use a novel method that makes important use of matrix perturbation theory. The probability distribution over columns of A and the rescaling are crucial features of the algorithms which must be chosen judiciously.},
author = {Drineas, Petros and Kannan, Ravi and Mahoney, Michael W},
doi = {10.1137/S0097539704442696},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Drineas, Kannan, Mahoney - 2006 - Fast Monte Carlo Algorithms for Matrices II Computing a Low-Rank Approximation to a Matrix.pdf:pdf},
institution = {Yale Univ.},
issn = {00975397},
journal = {SIAM Journal on Computing},
keywords = {massive data sets,monte carlo methods,printed,randomized algorithms,singular value},
mendeley-tags = {printed},
number = {1},
pages = {158},
publisher = {Society for Industrial and Applied Mathematics},
title = {{Fast Monte Carlo Algorithms for Matrices II: Computing a Low-Rank Approximation to a Matrix}},
url = {http://link.aip.org/link/SMJCAT/v36/i1/p158/s1\&Agg=doi},
volume = {36},
year = {2006}
}
@article{Drineas2006,
abstract = {Motivated by applications in which the data may be formulated as a matrix, we consider algorithms for several common linear algebra problems. These algorithms make more efficient use of computational resources, such as the computation time, random access memory (RAM), and the number of passes over the data, than do previously known algorithms for these problems. In this paper, we devise two algorithms for the matrix multiplication problem. Suppose A and B (which are mtimes n and ntimes p, respectively) are the two input matrices. In our main algorithm, we perform c independent trials, where in each trial we randomly sample an element of 1,2,ldots, n with an appropriate probability distribution backslash cal P on 1,2,ldots, n. We form an mtimes c matrix C consisting of the sampled columns of A, each scaled appropriately, and we form a ctimes n matrix R using the corresponding rows of B, again scaled appropriately. The choice of backslash cal P and the column and row scaling are crucial features of the algorithm. When these are chosen judiciously, we show that CR is a good approximation to AB. More precisely, we show that backslashleftbackslashAB-CRbackslashrightbackslashF = /backslashsqrt c) , where cdotF denotes the Frobenius norm, i.e., This algorithm can be implemented without storing the matrices A and B in RAM, provided it can make two passes over the matrices stored in external memory and use O(c(m+n+p)) additional RAM to construct C and R. We then present a second matrix multiplication algorithm which is similar in spirit to our main algorithm. In addition, we present a model (the pass-efficient model) in which the efficiency of these and other approximate matrix algorithms may be studied and which we argue is well suited to many applications involving massive data sets. In this model, the scarce computational resources are the number of passes over the data and the additional space and time required by the algorithm. The input matrices may be presented in any order of the entries (and not just row or column order), as is the case in many applications where, e.g., the data has been written in by multiple agents. In addition, the input matrices may be presented in a sparse representation, where only the nonzero entries are written.},
author = {Drineas, Petros and Kannan, Ravi and Mahoney, Michael W},
doi = {10.1137/S0097539704442684},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Drineas, Kannan, Mahoney - 2006 - Fast Monte Carlo Algorithms for Matrices I Approximating Matrix Multiplication.pdf:pdf},
institution = {Yale},
issn = {00975397},
journal = {SIAM Journal on Computing},
keywords = {printed},
mendeley-tags = {printed},
number = {1},
pages = {132},
publisher = {Citeseer},
title = {{Fast Monte Carlo Algorithms for Matrices I: Approximating Matrix Multiplication}},
url = {http://link.aip.org/link/SMJCAT/v36/i1/p132/s1\&Agg=doi},
volume = {36},
year = {2006}
}
@article{Echenique2007,
abstract = {The prediction of the three-dimensional native structure of proteins from the knowledge of their amino acid sequence, known as the protein folding problem, is one of the most important yet unsolved issues of modern science. Since the conformational behaviour of flexible molecules is nothing more than a complex physical problem, increasingly more physicists are moving into the study of protein systems, bringing with them powerful mathematical and computational tools, as well as the sharp intuition and deep images inherent to the physics discipline. This work attempts to facilitate the first steps of such a transition. In order to achieve this goal, we provide an exhaustive account of the reasons underlying the protein folding problem enormous relevance and summarize the present-day status of the methods aimed to solving it. We also provide an introduction to the particular structure of these biological heteropolymers, and we physically define the problem stating the assumptions behind this (commonly implicit) definition. Finally, we review the 'special flavor' of statistical mechanics that is typically used to study the astronomically large phase spaces of macromolecules. Throughout the whole work, much material that is found scattered in the literature has been put together here to improve comprehension and to serve as a handy reference.},
author = {Echenique, Pablo},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Echenique - 2007 - Introduction to protein folding for physicists.pdf:pdf},
journal = {Contemporary Physics},
number = {2},
pages = {53},
publisher = {Taylor \& Francis},
title = {{Introduction to protein folding for physicists}},
url = {http://arxiv.org/abs/0705.1845},
volume = {48},
year = {2007}
}
@article{EdithCohen1996,
address = {Berlin, Heidelberg},
author = {{Edith Cohen}},
doi = {10.1007/3-540-61310-2\_17},
editor = {Cunningham, William H. and McCormick, S. Thomas and Queyranne, Maurice},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Edith Cohen - 1996 - On optimizing multiplications of sparse matrices.pdf:pdf},
isbn = {978-3-540-61310-7},
journal = {Integer Programming and Combinatorial Optimization},
keywords = {printed},
mendeley-tags = {printed},
pages = {219--233},
publisher = {Springer Berlin Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{On optimizing multiplications of sparse matrices}},
url = {http://www.springerlink.com/index/10.1007/3-540-61310-2},
volume = {1084},
year = {1996}
}
@book{Eiter2005,
address = {Berlin, Heidelberg},
doi = {10.1007/b104421},
editor = {Eiter, Thomas and Libkin, Leonid},
isbn = {978-3-540-24288-8},
publisher = {Springer Berlin Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{Database Theory - ICDT 2005}},
url = {http://www.springerlink.com/content/tp581qc7ax7eqgt3/},
volume = {3363},
year = {2005}
}
@inproceedings{Ekanayake2009,
abstract = {Applying high level parallel runtimes to data/compute intensive applications is becoming increasingly common. The simplicity of the MapReduce programming model and the availability of open source MapReduce runtimes such as Hadoop, are attracting more users to the MapReduce programming model. Microsoft has released DryadLINQ for academic use, allowing users to experience a new programming model and a runtime that is capable of performing large scale data/compute intensive analyses. In this paper, we present our experience in applying DryadLINQ for a series of scientific data analysis applications, identify their mapping to the DryadLINQ programming model, and compare their performances with Hadoop implementations of the same applications.},
author = {Ekanayake, Jaliya and Gunarathne, Thilina and Fox, Geoffrey and Balkir, Atilla Soner and Poulain, Christophe and Araujo, Nelson and Barga, Roger},
booktitle = {2009 5th IEEE International Conference on e-Science},
doi = {10.1109/e-Science.2009.53},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Ekanayake et al. - 2009 - DryadLINQ for Scientific Analyses.pdf:pdf},
isbn = {978-1-4244-5340-5},
month = dec,
pages = {329--336},
publisher = {IEEE},
title = {{DryadLINQ for Scientific Analyses}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5380850},
year = {2009}
}
@inproceedings{Ekanayake2010a,
address = {New York, New York, USA},
author = {Ekanayake, Jaliya and Li, Hui and Zhang, Bingjing and Gunarathne, Thilina and Bae, Seung-Hee and Qiu, Judy and Fox, Geoffrey},
booktitle = {Proceedings of the 19th ACM International Symposium on High Performance Distributed Computing - HPDC '10},
doi = {10.1145/1851476.1851593},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Ekanayake et al. - 2010 - Twister.pdf:pdf},
isbn = {9781605589428},
keywords = {MapReduce,cloud technologies,iterative algorithms,printed},
mendeley-tags = {printed},
month = jun,
pages = {810},
publisher = {ACM Press},
title = {{Twister}},
url = {http://dl.acm.org/citation.cfm?id=1851476.1851593},
year = {2010}
}
@inproceedings{Ekanayake2008,
abstract = {Most scientific data analyses comprise analyzing voluminous data collected from various instruments. Efficient parallel/concurrent algorithms and frameworks are the key to meeting the scalability and performance requirements entailed in such scientific data analyses. The recently introduced MapReduce technique has gained a lot of attention from the scientific community for its applicability in large parallel data analyses. Although there are many evaluations of the MapReduce technique using large textual data collections, there have been only a few evaluations for scientific data analyses. The goals of this paper are twofold. First, we present our experience in applying the MapReduce technique for two scientific data analyses: (i) high energy physics data analyses; (ii) K-means clustering. Second, we present CGL-MapReduce, a streaming-based MapReduce implementation and compare its performance with Hadoop.},
author = {Ekanayake, Jaliya and Pallickara, Shrideep and Fox, Geoffrey},
booktitle = {2008 IEEE Fourth International Conference on eScience},
doi = {10.1109/eScience.2008.59},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Ekanayake, Pallickara, Fox - 2008 - MapReduce for Data Intensive Scientific Analyses.pdf:pdf},
isbn = {978-1-4244-3380-3},
month = dec,
pages = {277--284},
publisher = {IEEE},
title = {{MapReduce for Data Intensive Scientific Analyses}},
url = {http://ieeexplore.ieee.org/xpl/freeabs\_all.jsp?arnumber=4736768},
year = {2008}
}
@article{Ekanayake2010,
abstract = {We present our experiences in applying, developing, and evaluating cloud and cloud technologies. First, we present our experience in applying Hadoop and DryadLINQ to a series of data/compute intensive applications and then compare them with a novel MapReduce runtime developed by us, named CGL-MapReduce, and MPI. Preliminary applications are developed for particle physics, bioinformatics, clustering, and matrix multiplication. We identify the basic execution units of the MapReduce programming model and categorize the runtimes according to their characteristics. MPI versions of the applications are used where the contrast in performance needs to be highlighted. We discuss the application structure and their mapping to parallel architectures of different types, and look at the performance of these applications. Next, we present a performance analysis of MPI parallel applications on virtualized resources.},
author = {Ekanayake, Jaliya and Qiu, Xiaohong and Gunarathne, Thilina and Beason, Scott and Fox, Geoffrey},
doi = {10.1186/1471-2148-11-350},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Ekanayake et al. - 2010 - High Performance Parallel Computing with Cloud and Cloud Technologies.pdf:pdf},
isbn = {9781439803158},
issn = {14712148},
journal = {Technology},
keywords = {printed},
mendeley-tags = {printed},
number = {1},
pages = {1--39},
publisher = {CRC Press (Taylor and Francis)},
title = {{High Performance Parallel Computing with Cloud and Cloud Technologies}},
url = {http://www.biomedcentral.com/1471-2148/11/350 http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.148.6044},
volume = {1},
year = {2010}
}
@article{El-qawasmeh2004,
abstract = {Abstract. A quick matrix multiplication algorithm is presented and evaluated on a cluster of networked workstations consisting of Pentium hosts connected together by Ethernet segments. The obtained results confirm the feasibility of using networked workstations to provide fast and low cost solutions to many computationally intensive applications such as large linear algebraic systems. The paper also presents and verifies an accurate timing model to predict the performance of the proposed algorithm on arbitrary clusters of workstations. Through this model the viability of the proposed algorithm can be revealed without the extra effort that would be needed to carry out real testing.},
author = {El-qawasmeh, Eyas and Abu-ghazaleh, Nayef},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/El-qawasmeh, Abu-ghazaleh - 2004 - Quick Matrix Multiplication on Clusters of Workstations.pdf:pdf},
journal = {Informatica},
number = {2},
pages = {213--218},
title = {{Quick Matrix Multiplication on Clusters of Workstations}},
url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.114.8069},
volume = {15},
year = {2004}
}
@article{AnithaElavarasi2011,
abstract = {Learning is the process of generating useful information from a huge volume of data. Learning can be classified as supervised learning and unsupervised learning. Clustering is a kind of unsupervised learning. A pattern representing a common behavior or characteristics that exist among each item can be generated. This paper gives an overview of different partition clustering algorithm. It describes about the general working behavior, the methodologies followed on these approaches and the parameters which affects the performance of these algorithms.},
author = {Elavarasi, Anitha S. and Akilandeswari, J.},
doi = {10.1.1.188.7629},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Elavarasi, Akilandeswari - 2011 - A Survey of Partition Clustering Algorithms.pdf:pdf},
journal = {International Journal of Enterprise Computing and Business Systems},
keywords = {Clustering,Supervised Learning,Unsupervised Learning},
number = {1},
pages = {14},
title = {{A Survey of Partition Clustering Algorithms}},
url = {http://www.ijecbs.com},
volume = {1},
year = {2011}
}
@article{Enright2002a,
abstract = {Detection of protein families in large databases is one of the principal research objectives in structural and functional genomics. Protein family classification can significantly contribute to the delineation of functional diversity of homologous proteins, the prediction of function based on domain architecture or the presence of sequence motifs as well as comparative genomics, providing valuable evolutionary insights. We present a novel approach called TRIBE-MCL for rapid and accurate clustering of protein sequences into families. The method relies on the Markov cluster (MCL) algorithm for the assignment of proteins into families based on precomputed sequence similarity information. This novel approach does not suffer from the problems that normally hinder other protein sequence clustering algorithms, such as the presence of multi-domain proteins, promiscuous domains and fragmented proteins. The method has been rigorously tested and validated on a number of very large databases, including SwissProt, InterPro, SCOP and the draft human genome. Our results indicate that the method is ideally suited to the rapid and accurate detection of protein families on a large scale. The method has been used to detect and categorise protein families within the draft human genome and the resulting families have been used to annotate a large proportion of human proteins.},
annote = {        From Duplicate 1 (                           An efficient algorithm for large-scale detection of protein families                         - Enright, A. J. )
                
Relevant References: 
Van Dongen (2000) :A new cluster algorithm for graphs Report no INS-R0010 
Van Dongen (2000) :Performance Criteria for graph clustering and Markov Clustering experiments 
        
        From Duplicate 2 (                           An efficient algorithm for large-scale detection of protein families                         - Enright, A. J. )
                
        
        
      },
author = {Enright, Anton James},
doi = {10.1093/nar/30.7.1575},
issn = {13624962},
journal = {Nucleic Acids Research},
keywords = {printed},
mendeley-tags = {printed},
month = apr,
number = {7},
pages = {1575--1584},
title = {{An efficient algorithm for large-scale detection of protein families}},
url = {http://www.nar.oupjournals.org/cgi/doi/10.1093/nar/30.7.1575},
volume = {30},
year = {2002}
}
@article{Fatahalian2004,
abstract = {Problems of the form a + b - b have been used to assess conceptual understanding of the relationship between addition and subtraction. No study has investigated the same relationship between multiplication and division on problems of the form d x e e. In both types of inversion problems, no calculation is required if the inverse relationship between the operations is understood. Adult participants solved addition/subtraction and multiplication/division inversion (e.g., 9 x 22 22) and standard (e.g., 2 + 27 - 28) problems. Participants started to use the inversion strategy earlier and more frequently on addition/subtraction problems. Participants took longer to solve both types of multiplication/division problems. Overall, conceptual understanding of the relationship between multiplication and division was not as strong as that between addition and subtraction. One explanation for this difference in performance is that the operation of division is more weakly represented and understood than the other operations and that this weakness affects performance on problems of the form d x e e.},
author = {Fatahalian, Kayvon and Sugerman, Jeremy and Hanrahan, Pat},
doi = {10.1145/1058129.1058148},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Fatahalian, Sugerman, Hanrahan - 2004 - Understanding the efficiency of GPU algorithms for matrix-matrix multiplication.pdf:pdf},
institution = {ACM},
isbn = {3905673150},
issn = {17273471},
journal = {Proceedings of the ACM SIGGRAPHEUROGRAPHICS conference on Graphics hardware HWWS 04},
keywords = {printed},
mendeley-tags = {printed},
number = {2004},
pages = {133},
publisher = {ACM Press},
title = {{Understanding the efficiency of GPU algorithms for matrix-matrix multiplication}},
url = {http://portal.acm.org/citation.cfm?doid=1058129.1058148},
volume = {2004},
year = {2004}
}
@article{Fei2008,
author = {Fei, Hongliang and Huan, Jun},
doi = {10.1145/1458082.1458212},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Fei, Huan - 2008 - Structure feature selection for graph classification.pdf:pdf},
isbn = {9781595939913},
journal = {Proceeding of the 17th ACM conference on Information and knowledge mining CIKM 08},
keywords = {classification,data mining,feature selection},
pages = {991},
publisher = {ACM Press},
title = {{Structure feature selection for graph classification}},
url = {http://portal.acm.org/citation.cfm?doid=1458082.1458212},
year = {2008}
}
@article{Feigenbaum2002,
author = {Feigenbaum, Joan and Kannan, Sampath and Strauss, Martin J. and Viswanathan, Mahesh},
doi = {10.1137/S0097539799361701},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Feigenbaum et al. - 2002 - An Approximate L1-Difference Algorithm for Massive Data Streams.pdf:pdf},
issn = {00975397},
journal = {SIAM Journal on Computing},
keywords = {distance approximation,streaming algorithms},
month = jan,
number = {1},
pages = {131},
title = {{An Approximate L1-Difference Algorithm for Massive Data Streams}},
url = {http://dl.acm.org/citation.cfm?id=589343.592594},
volume = {32},
year = {2002}
}
@book{Festa2010,
address = {Berlin, Heidelberg},
doi = {10.1007/978-3-642-13193-6},
editor = {Festa, Paola},
isbn = {978-3-642-13192-9},
publisher = {Springer Berlin Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{Experimental Algorithms}},
url = {http://www.springerlink.com/index/10.1007/978-3-642-13193-6},
volume = {6049},
year = {2010}
}
@inproceedings{Fiduccia1971,
abstract = {This paper deals with three aspects of algebraic complexity. The first section is concerned with lower bounds on the number of operations required to compute several functions. Several theorems are presented and their proofs sketched. The second section deals with relationships among the complexities of several sets of functions. In the third section, several matrices of general interest are examined and upper bounds on the number of operations required to multiply by them are constructively derived.},
author = {Fiduccia, Charles M},
booktitle = {Proceedings of the 3rd annual ACM symposium on Theory of computing STOC 71},
doi = {10.1145/800157.805037},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Fiduccia - 1971 - Fast matrix multiplication.pdf:pdf},
number = {May 1971},
pages = {45--49},
publisher = {ACM Press},
title = {{Fast matrix multiplication}},
url = {http://portal.acm.org/citation.cfm?doid=800157.805037},
year = {1971}
}
@article{Fox2008,
abstract = {Abstract. We describe a suite of data mining tools that cover clustering, information retrieval and the mapping of high dimensional data to low dimensions for visualization. Preliminary applications are given to particle physics, bioinformatics and medical informatics. The data vary in ...},
author = {Fox, Geoffrey and Bae, Seung-hee and Ekanayake, Jaliya and Qiu, Xiaohong},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Fox et al. - 2008 - Parallel Data Mining from Multicore to Cloudy Grids.pdf:pdf},
journal = {Science},
keywords = {ccr,clustering,mapreduce,mpi,multidimensional,performance},
pages = {2008--2008},
publisher = {Citeseer},
title = {{Parallel Data Mining from Multicore to Cloudy Grids}},
url = {http://scholar.google.com/scholar?hl=en\&btnG=Search\&q=intitle:Parallel+Data+Mining+from+Multicore+to+Cloudy+Grids\#0},
year = {2008}
}
@article{Fraenkel1993,
abstract = {It is believed that the native folded three-dimensional conformation of a protein is its lowest free energy state, or one of its lowest. It is shown here that both a two- and three-dimensional mathematical model describing the folding process as a free energy minimization problem is NP-hard. This means that the problem belongs to a large set of computational problems, assumed to be very hard ("conditionally intractable"). Some of the possible ramifications of this result are speculated upon.},
author = {Fraenkel, A S},
institution = {Department of Mathematics, University of Pennsylvania, Philadelphia 19104-6395.},
journal = {Bulletin of Mathematical Biology},
number = {6},
pages = {1199--1210},
pmid = {8281132},
publisher = {Springer},
title = {{Complexity of protein folding.}},
url = {http://www.springerlink.com/index/N111140117672R08.pdf},
volume = {55},
year = {1993}
}
@phdthesis{Frigo1999a,
author = {Frigo, Matteo},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Frigo - 1999 - Portable High-Performance Programs.pdf:pdf},
pages = {169},
school = {MIT},
title = {{Portable High-Performance Programs}},
type = {Phd},
year = {1999}
}
@article{Frigo1999,
abstract = {This paper presents asymptotically optimal algorithms for rectangular matrix transpose, FFT, and sorting on computers with multiple levels of caching. Unlike previous optimal algorithms, these algorithms are cache oblivious: no variables dependent on hardware parameters, such as cache size and cache-line length, need to be tuned to achieve optimality. Nevertheless, these algorithms use an optimal amount of work and move data optimally among multiple levels of cache. For a cache with size Z and cache-line length L where Z=$\Omega$(L2 the number of cache misses for an mn matrix transpose is $\Theta$(1+mn/L). The number of cache misses for either an n-point FFT or the sorting of n numbers is $\Theta$(1+(n/L)(1+logZn)). We also give an $\Theta$(mnp)-work algorithm to multiply an mn matrix by an np matrix that incurs $\Theta$(1+(mn+np+mp)/L+mnp/LZ) cache faults. We introduce an ideal-cache model to analyze our algorithms. We prove that an optimal cache-oblivious algorithm designed for two levels of memory is also optimal for multiple levels and that the assumption of optimal replacement in the ideal-cache model. Can be simulated efficiently by LRU replacement. We also provide preliminary empirical results on the effectiveness of cache-oblivious algorithms in practice},
author = {Frigo, Matteo and Leiserson, Charles E and Prokop, Harald and Ramachandran, S},
doi = {10.1109/SFFCS.1999.814600},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Frigo et al. - 1999 - Cache-oblivious algorithms.pdf:pdf},
institution = {Massachusetts Institute of Technology},
isbn = {0769504094},
issn = {02725428},
journal = {40th Annual Symposium on Foundations of Computer Science},
number = {1},
pages = {285--297},
publisher = {IEEE Comput. Soc},
title = {{Cache-oblivious algorithms}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=814600},
volume = {2},
year = {1999}
}
@article{Frigo2006,
abstract = {We present a technique for analyzing the number of cache misses incurred by multithreaded cache oblivious algorithms on an idealized parallel machine in which each processor has a private cache. We specialize this technique to computations executed by the Cilk work-stealing scheduler on a machine with dag-consistent shared memory. We show that a multithreaded cache oblivious matrix multiplication incurs O n 3 /\&8730; Z + Pn 1/3 n 2 cache misses when executed by the Cilk scheduler on a machine with P processors, each with a cache of size Z , with high probability. This bound is tighter than previously published bounds. We also present a new multithreaded cache oblivious algorithm for 1D stencil computations, which incurs O n 2 Z + n +\&8730; Pn 3 + \&949; cache misses with high probability.},
author = {Frigo, Matteo and Strumpen, Volker},
doi = {10.1007/s00224-007-9098-2},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Frigo, Strumpen - 2006 - The Cache Complexity of Multithreaded Cache Oblivious Algorithms.pdf:pdf},
isbn = {1595934529},
issn = {14324350},
journal = {Theory of Computing Systems},
keywords = {printed},
mendeley-tags = {printed},
number = {2},
pages = {203--233},
publisher = {ACM Press},
title = {{The Cache Complexity of Multithreaded Cache Oblivious Algorithms}},
url = {http://www.springerlink.com/index/10.1007/s00224-007-9098-2},
volume = {45},
year = {2006}
}
@article{Furedi1981,
abstract = {LetA=(a ij be ann n matrix whose entries forij are independent random variables anda ji =a ij . Suppose that everya ij is bounded and for everyi>j we haveEa ij =$\mu$,D 2 a ij =$\sigma$2 andEa ii =v. E. P. Wigner determined the asymptotic behavior of the eigenvalues ofA (semi-circle law). In particular, for anyc>2$\sigma$ with probability 1-o(1) all eigenvalues except for at mosto(n) lie in the intervalI=(cn,cn). We show that with probability 1-o(1)all eigenvalues belong to the above intervalI if $\mu$=0, while in case $\mu$>0 only the largest eigenvalue $\lambda$1 is outsideI, and lambda 1 = fracSigma i,j aij n + fracsigma 2 mu + Oleft( fracIsqrt n right) i.e. $\lambda$1 asymptotically has a normal distribution with expectation (n1)$\mu$+v+($\sigma$2/$\mu$) and variance 2$\sigma$2 (bounded variance!).},
author = {F\"{u}redi, Z and Koml\'{o}s, J},
doi = {10.1007/BF02579329},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/F\"{u}redi, Koml\'{o}s - 1981 - The eigenvalues of random symmetric matrices.pdf:pdf},
issn = {02099683},
journal = {Combinatorica},
number = {3},
pages = {233--241},
publisher = {Elsevier},
title = {{The eigenvalues of random symmetric matrices}},
url = {http://www.springerlink.com/index/10.1007/BF02579329},
volume = {1},
year = {1981}
}
@article{P.J.2010,
author = {Gayathri, P.J. and Punitha, S.C. and Punithavalli, M.},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Gayathri, Punitha, Punithavalli - 2010 - Document Clustering using Linear Partitioning and Reallocation using EM Algorithm.pdf:pdf},
journal = {Global Journal of Computer Science and Technology},
number = {5},
pages = {88--93},
title = {{Document Clustering using Linear Partitioning and Reallocation using EM Algorithm}},
volume = {9},
year = {2010}
}
@article{VanDeGeijn1997,
author = {Geijn, Robert A. Van De and Watts, J.},
doi = {10.1002/(SICI)1096-9128(199704)9:4<255::AID-CPE250>3.0.CO;2-2},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Geijn, Watts - 1997 - SUMMA scalable universal matrix multiplication algorithm.pdf:pdf},
issn = {1040-3108},
journal = {Concurrency: Practice and Experience},
keywords = {printed},
mendeley-tags = {printed},
month = apr,
number = {4},
pages = {255--274},
title = {{SUMMA: scalable universal matrix multiplication algorithm}},
url = {http://doi.wiley.com/10.1002/(SICI)1096-9128(199704)9:4<255::AID-CPE250>3.0.CO;2-2},
volume = {9},
year = {1997}
}
@article{Gilbert1994,
abstract = {Many sparse matrix algorithms—for example, solving a sparse system of linear equations—begin by predicting the nonzero structure of the output of a matrix computation from the nonzero structure of its input. This paper is a catalog of ways to predict nonzero structure. It contains known results for some problems, including various matrix factorizations, and new results for other problems, including some eigenvector computations.},
author = {Gilbert, John R.},
doi = {10.1137/S0895479887139455},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Gilbert - 1994 - Predicting Structure in Sparse Matrix Computations.pdf:pdf},
issn = {08954798},
journal = {SIAM Journal on Matrix Analysis and Applications},
keywords = {eigenvectors,graph theory,matrix factorization,printed,sparse matrix algorithms,systems of linear equations},
mendeley-tags = {printed},
number = {1},
pages = {62},
title = {{Predicting Structure in Sparse Matrix Computations}},
url = {http://link.aip.org/link/SJMAEL/v15/i1/p62/s1\&Agg=doi},
volume = {15},
year = {1994}
}
@inproceedings{Gilbert2007,
abstract = {Large-scale computation on graphs and other discrete structures is becoming increasingly important in many applications, including computational biology, web search, and knowledge discovery. High-performance combinatorial computing is an infant field, in sharp contrast with numerical scientific computing. We argue that many of the tools of high-performance numerical computing – in particular, parallel algorithms and data structures for computation with sparse matrices – can form the nucleus of a robust infrastructure for parallel computing on graphs. We demonstrate this with an implementation of a graph analysis benchmark using the sparse matrix infrastructure in Star-P , our parallel dialect of the Matlab programming language.},
address = {Berlin, Heidelberg},
author = {Gilbert, John R. and Reinhardt, Steve and Shah, Viral and K\aa gstr\"{o}m, Bo and Elmroth, Erik and Dongarra, Jack and Wasniewski, Jerzy},
booktitle = {PARA'06: Proceedings of the 8th international conference on Applied parallel computing: state of the art in scientific computing},
doi = {10.1007/978-3-540-75755-9},
editor = {K\aa gstr\"{o}m, Bo and Elmroth, Erik and Dongarra, Jack and Wa\'{s}niewski, Jerzy},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Gilbert et al. - 2007 - High-Performance Graph Algorithms from Parallel Sparse Matrices.pdf:pdf},
isbn = {978-3-540-75754-2},
keywords = {Computer Science,printed},
mendeley-tags = {printed},
pages = {260--269},
publisher = {Springer Berlin Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{High-Performance Graph Algorithms from Parallel Sparse Matrices}},
url = {http://www.springerlink.com/content/g022322v21501800/},
volume = {4699},
year = {2007}
}
@article{Gillespie2009,
abstract = {Background: Difficult problems in structural bioinformatics are often studied in simple exact models to gain insights and to derive general principles. Protein folding, for example, has long been studied in the lattice model. Recently, researchers have also begun to apply the lattice model to the study of RNA folding. Results: We present a novel method for predicting RNA secondary structures with pseudoknots: first simulate the folding dynamics of the RNA sequence on the 3D triangular lattice, next extract and select a set of disjoint base pairs from the best lattice conformation found by the folding simulation. Experiments on sequences from PseudoBase show that our prediction method outperforms the HotKnot algorithm of Ren, Rastegari, Condon and Hoos, a leading method for RNA pseudoknot prediction. Our method for RNA secondary structure prediction can be adapted into an efficient reconstruction method that, given an RNA sequence and an associated secondary structure, finds a conformation of the sequence on the 3D triangular lattice that realizes the base pairs in the secondary structure. We implemented a suite of computer programs for the simulation and visualization of RNA folding on the 3D triangular lattice. These programs come with detailed documentation and are accessible from the companion website of this paper at http://www.cs.usu.edu/\~{}mjiang/rna/DeltaIS/. Conclusion: Folding simulation on the 3D triangular lattice is effective method for RNA secondary structure prediction and lattice conformation reconstruction. The visualization software for the lattice conformations of RNA structures is a valuable tool for the study of RNA folding and is a great pedagogic device.},
author = {Gillespie, Joel and Mayne, Martin and Jiang, Minghui},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Gillespie, Mayne, Jiang - 2009 - RNA folding on the 3D triangular lattice.pdf:pdf},
institution = {Department of Computer Science, Utah State University, Logan, Utah 84322-4205, USA. jgillespie@cc.usu.edu},
journal = {BMC Bioinformatics},
number = {1},
pages = {369},
publisher = {BioMed Central},
title = {{RNA folding on the 3D triangular lattice}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/19891777},
volume = {10},
year = {2009}
}
@book{Golub1996,
abstract = {Revised and updated, the third edition of Golub and Van Loan's classic text in computer science provides essential information about the mathematical background and algorithmic skills required for the production of numerical software. This new edition includes thoroughly revised chapters on matrix multiplication problems and parallel matrix computations, expanded treatment of CS decomposition, an updated overview of floating point arithmetic, a more accurate rendition of the modified Gram-Schmidt process, and new material devoted to GMRES, QMR, and other methods designed to handle the sparse unsymmetric linear system problem.},
author = {Golub, Gene H. and Loan, Charles F. Van},
edition = {3},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Golub, Loan - 1996 - Matrix Computations.pdf:pdf},
isbn = {978-0801854149},
pages = {827},
publisher = {The Johns Hopkins University Press},
title = {{Matrix Computations}},
url = {http://www.amazon.com/Computations-Hopkins-Studies-Mathematical-Sciences/dp/0801854148},
year = {1996}
}
@article{Goni2008,
abstract = {Background: Recent developments have meant that network theory is making an important contribution to the topological study of biological networks, such as protein-protein interaction (PPI) networks. The identification of differentially expressed genes in DNA array experiments is a source of information regarding the molecular pathways involved in disease. Thus, considering PPI analysis and gene expression studies together may provide a better understanding of multifactorial neurodegenerative diseases such as Multiple Sclerosis (MS) and Alzheimer disease (AD). The aim of this study was to assess whether the parameters of degree and betweenness, two fundamental measures in network theory, are properties that differentiate between implicated (seed-proteins) and non-implicated nodes (neighbors) in MS and AD. We used experimentally validated PPI information to obtain the neighbors for each seed group and we studied these parameters in four networks: MS-blood network; MS-brain network; AD-blood network; and AD-brain network. Results: Specific features of seed-proteins were revealed, whereby they displayed a lower average degree in both diseases and tissues, and a higher betweenness in AD-brain and MS-blood networks. Additionally, the heterogeneity of the processes involved indicate that these findings are not pathway specific but rather that they are spread over different pathways. Conclusion: Our findings show differential centrality properties of proteins whose gene expression is impaired in neurodegenerative diseases.},
author = {Go\~{n}i, Joaqu\'{\i}n and Esteban, Francisco J and {De Mendiz\'{a}bal}, Nieves V\'{e}lez and Sepulcre, Jorge and Ardanza-Trevijano, Sergio and Agirrezabal, Ion and Villoslada, Pablo},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Go\~{n}i et al. - 2008 - A computational analysis of protein-protein interaction networks in neurodegenerative diseases.pdf:pdf},
institution = {Neuroimmunology laboratory, Department of Neuroscience, Center for Applied Medical Research, University of Navarra, Spain. jgoni@unav.es},
journal = {BMC Systems Biology},
keywords = {not printed},
mendeley-tags = {not printed},
number = {1},
pages = {52},
publisher = {BioMed Central},
title = {{A computational analysis of protein-protein interaction networks in neurodegenerative diseases}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/18570646},
volume = {2},
year = {2008}
}
@article{Goto2008,
abstract = {A simple but highly effective approach for transforming high-performance implementations on cache-based architectures of matrix-matrix multiplication into implementations of other commonly used matrix-matrix computations (the level-3 BLAS) is presented. Exceptional performance is demonstrated on various architectures.},
author = {Goto, Kazushige and {Van De Geijn}, Robert A.},
doi = {10.1145/1377603.1377607},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Goto, Van De Geijn - 2008 - High-performance implementation of the level-3 BLAS.pdf:pdf},
institution = {The University of Texas at Austin, Department of Computer Sciences},
issn = {00983500},
journal = {ACM Transactions on Mathematical Software},
keywords = {printed},
mendeley-tags = {printed},
number = {1},
pages = {1--14},
publisher = {ACM},
title = {{High-performance implementation of the level-3 BLAS}},
url = {http://portal.acm.org/citation.cfm?doid=1377603.1377607},
volume = {35},
year = {2008}
}
@inproceedings{Goyal2009,
address = {Singapore},
author = {Goyal, Navneet and Goyal, Poonam and Venkatramaiah, K. and {Deepak P C} and {Sanoop P S}},
booktitle = {IPCSIT},
pages = {323--333},
title = {{Incremental Clustering for Mining in a Data Warehousing Environment}},
year = {2009}
}
@inproceedings{Greiner2010,
abstract = {We consider the multiplication of a sparse N N matrix A with a dense N N matrix B in the I/O model. We determine the worst-case non-uniform complexity of this task up to a constant factor for all meaningful choices of the parameters N (dimension of the matrices), k (average number of non-zero entries per column or row in A, i.e., there are in total kN non-zero entries), M (main memory size), and B (block size), as long as M B2 (tall cache assumption). For large and small k, the structure of the algorithm does not need to depend on the structure of the sparse matrix A, whereas for intermediate densities it is possible and necessary to find submatrices that fit in memory and are slightly denser than on average. The focus of this work is asymptotic worst-case complexity, i.e., the existence of matrices that require a certain number of I/Os and the existence of algorithms (sometimes depending on the shape of the sparse matrix) that use only a constant factor more I/Os.},
address = {Berlin, Heidelberg},
author = {Greiner, Gero and Jacob, Riko},
booktitle = {LATIN 2010: Theoretical Informatics},
doi = {10.1007/978-3-642-12200-2},
editor = {L\'{o}pez-Ortiz, Alejandro},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Greiner, Jacob - 2010 - The IO Complexity of Sparse Matrix Dense Matrix Multiplication.pdf:pdf},
isbn = {978-3-642-12199-9},
pages = {143--156},
publisher = {Springer Berlin Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{The I/O Complexity of Sparse Matrix Dense Matrix Multiplication.}},
url = {http://www.springerlink.com/index/10.1007/978-3-642-12200-2},
volume = {6034},
year = {2010}
}
@incollection{Gunnels2007,
abstract = {We present a study of implementations of DGEMM using both the cache-oblivious and cache-conscious programming styles. The cache-oblivious programs use recursion and automatically block DGEMM operands A , B , C for the memory hierarchy. The cache-conscious programs use iteration and explicitly block A , B , C for register files, all caches and memory. Our study shows that the cache-oblivious programs achieve substantially less performance than the cache-conscious programs. We discuss why this is so and suggest approaches for improving the performance of cache-oblivious programs.},
address = {Berlin, Heidelberg},
author = {Gunnels, John and Gustavson, Fred G. and Pingali, Keshav and Yotov, Kamen and K\aa gstr\"{o}m, Bo and Elmroth, Erik and Dongarra, Jack and Wasniewski, Jerzy},
booktitle = {Lecture Notes in Computer Science},
doi = {10.1007/978-3-540-75755-9\_109},
editor = {K\aa gstr\"{o}m, Bo and Elmroth, Erik and Dongarra, Jack and Wa\'{s}niewski, Jerzy},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Gunnels et al. - 2007 - Is Cache-Oblivious DGEMM Viable.pdf:pdf},
isbn = {978-3-540-75754-2},
keywords = {Computer Science,printed},
mendeley-tags = {printed},
pages = {919--928},
publisher = {Springer Berlin Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{Is Cache-Oblivious DGEMM Viable?}},
url = {http://www.springerlink.com/content/l63qh28765317366/},
volume = {4699},
year = {2007}
}
@misc{Gupta1993,
abstract = {A number of parallel formulations of dense matrix multiplication algorithms have been developed. For arbitrarily large number of processors, any of there algorithms or their veriants can provide near linear speedup for sufficiently large matrix sizes and none of the algoroithms can be clearly claimed to be superior than the others. In this paper we analyze the performace and scalability of a number of parallel formulations of the matrix multiplication algorithm and predict the conditions under which each formulation is better than the others. We present a parallel formulation for hypercube and related architectures that performs better than any ot the schemes discribed in the literature so far for a wide range of matrix sizes and number of processors. The superior performance and the analytical scalability expressions for this algorithm are verified through experiments on the Thinking Machines Corporations\&apos;s CM-5 parallel computer for up to 512 processors. We show that special hardware permitting simultaneous communication on all the ports of the processors does not improve the overall scalability of the matrix multiplication algorithms on hypercub. We also dicuss the dependence of scalability on technology dependent factors such as communication and computation speeds and show that under certain conditions, it may be better to have a parallel computer with k-fold as many processors rather than one with the same number of processors, each k-fold as fast.},
author = {Gupta, Anshul and Kumar, Vipin},
booktitle = {1993 International Conference on Parallel Processing ICPP93 Vol3},
doi = {10.1109/ICPP.1993.160},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Gupta, Kumar - 1993 - Scalability of Parallel Algorithms for Matrix Multiplication.pdf:pdf},
isbn = {0849389836},
keywords = {printed},
mendeley-tags = {printed},
pages = {115--123},
publisher = {Ieee},
title = {{Scalability of Parallel Algorithms for Matrix Multiplication}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4134256},
year = {1993}
}
@article{Gupta1996,
author = {Gupta, S. K. S. and Huang, Hsuan-Cheng and Sadayappan, P. and Johnson, R. W.},
doi = {http://dx.doi.org/10.1006/jpdc.1996.0051},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Gupta et al. - 1996 - A Framework for Generating Distributed-Memory Parallel Programs for Block Recursive Algorithms.pdf:pdf},
journal = {Journal of Parallel and Distributed Computing},
keywords = {printed},
mendeley-tags = {printed},
pages = {137--153},
title = {{A Framework for Generating Distributed-Memory Parallel Programs for Block Recursive Algorithms}},
url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.3334},
volume = {34},
year = {1996}
}
@article{Gustafsson2009,
abstract = {Recently, important insights into static network topology for biological systems have been obtained, but still global dynamical network properties determining stability and system responsiveness have not been accessible for analysis. Herein, we explore a genome-wide gene-to-gene regulatory network based on expression data from the cell cycle in Saccharomyces cerevisae (budding yeast). We recover static properties like hubs (genes having several out-going connections), network motifs and modules, which have previously been derived from multiple data sources such as whole-genome expression measurements, literature mining, protein-protein and transcription factor binding data. Further, our analysis uncovers some novel dynamical design principles; hubs are both repressed and repressors, and the intra-modular dynamics are either strongly activating or repressing whereas inter-modular couplings are weak. Finally, taking advantage of the inferred strength and direction of all interactions, we perform a global dynamical systems analysis of the network. Our inferred dynamics of hubs, motifs and modules produce a more stable network than what is expected given randomised versions. The main contribution of the repressed hubs is to increase system stability, while higher order dynamic effects (e.g. module dynamics) mainly increase system flexibility. Altogether, the presence of hubs, motifs and modules induce few flexible modes, to which the network is extra sensitive to an external signal. We believe that our approach, and the inferred biological mode of strong flexibility and stability, will also apply to other cellular networks and adaptive systems.},
author = {Gustafsson, M and H\"{o}rnquist, M and Bj\"{o}rkegren, J and Tegn\'{e}r, J},
doi = {10.1049/iet-syb.2008.0112},
issn = {1751-8849},
journal = {IET systems biology},
keywords = {Adaptation,Biological,Cell Cycle,Cell Cycle Proteins,Cell Cycle Proteins: metabolism,Cell Cycle: physiology,Computer Simulation,Models,Physiological,Physiological: physiology,Proteome,Proteome: metabolism,Saccharomyces cerevisiae,Saccharomyces cerevisiae Proteins,Saccharomyces cerevisiae Proteins: metabolism,Saccharomyces cerevisiae: cytology,Saccharomyces cerevisiae: metabolism,Signal Transduction,Signal Transduction: physiology},
month = jul,
number = {4},
pages = {219--28},
pmid = {19640161},
title = {{Genome-wide system analysis reveals stable yet flexible network dynamics in yeast.}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/19640161},
volume = {3},
year = {2009}
}
@inproceedings{Hall-Holt2006,
address = {New York, New York, USA},
author = {Hall-Holt, Olaf and Katz, Matthew J. and Kumar, Piyush and Mitchell, Joseph S. B. and Sityon, Arik},
booktitle = {Proceedings of the 17th annual ACM-SIAM symposium on Discrete algorithm - SODA '06},
doi = {10.1145/1109557.1109610},
isbn = {0898716055},
pages = {474--483},
publisher = {ACM Press},
title = {{Finding large sticks and potatoes in polygons}},
url = {http://portal.acm.org/citation.cfm?doid=1109557.1109610},
year = {2006}
}
@book{Han2006,
abstract = {Our ability to generate and collect data has been increasing rapidly. Not only are all of our business, scientific, and government transactions now computerized, but the widespread use of digital cameras, publication tools, and bar codes also generate data. On the collection side, scanned text and image platforms, satellite remote sensing systems, and the World Wide Web have flooded us with a tremendous amount of data. This explosive growth has generated an even more urgent need for new techniques and automated tools that can help us transform this data into useful information and knowledge. Like the first edition, voted the most popular data mining book by KD Nuggets readers, this book explores concepts and techniques for the discovery of patterns hidden in large data sets, focusing on issues relating to their feasibility, usefulness, effectiveness, and scalability. However, since the publication of the first edition, great progress has been made in the development of new data mining methods, systems, and applications. This new edition substantially enhances the first edition, and new chapters have been added to address recent developments on mining complex types of data- including stream data, sequence data, graph structured data, social network data, and multi-relational data. Whether you are a seasoned professional or a new student of data mining, this book has much to offer you: A comprehensive, practical look at the concepts and techniques you need to know to get the most out of real business data. Updates that incorporate input from readers, changes in the field, and more material on statistics and machine learning. Dozens of algorithms and implementation examples, all in easily understood pseudo-code and suitable for use in real-world, large-scale data mining projects. Complete classroom support for instructors at www.mkp.com/datamining2e companion site.},
author = {Han, Jiawei and Kamber, Micheline},
booktitle = {Annals of Physics},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Han, Kamber - 2006 - Data Mining Concepts and Techniques.pdf:pdf},
isbn = {1558609016},
pages = {770},
publisher = {Morgan Kaufmann},
series = {The Morgan Kaufmann series in data management systems},
title = {{Data Mining: Concepts and Techniques}},
url = {http://www.amazon.com/Data-Mining-Concepts-Techniques-Management/dp/1558609016},
volume = {54},
year = {2006}
}
@article{Hanif2004,
abstract = {A protein is identified by a finite sequence of amino acids, each of them chosen from a set of 20 elements. The Protein Structure Prediction Problem, fundamental for biological and pharmaceutical research, is the problem of predicting the 3D native conformation of a protein, when its sequence of amino acids is known. All current mathematical models of the problem are affected by intrinsic computational limits, and by a disagreement on which is the most reliable energy function to be used. In this paper we present an agent-based framework for ab-initio simulations, composed by different levels of agents. Each amino acid of an input protein is viewed as an independent agent that communicates with the others. These agents are coordinated by strategic and cooperative higher level agents. The framework allows a modular representation of the problem and it is easily extensible for further refinements and for different energy functions. Simulations at this level of abstraction allow fast calculation, distributed on each agent. We provide an implementation using the Linda package of SICStus Prolog, to show the feasibility and the power of the method.},
author = {Hanif, By and Movahed, Bayat},
doi = {10.1.1.113.2609},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Hanif, Movahed - 2004 - Simulation of Protein Folding.pdf:pdf},
journal = {Energy},
number = {C},
pages = {1--14},
title = {{Simulation of Protein Folding}},
url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.113.2609},
volume = {00},
year = {2004}
}
@inproceedings{Hartley2006,
author = {Hartley, Timothy and Catalyurek, Umit and Ozguner, Fusun and Yoo, Andy and Kohn, Scott and Henderson, Keith},
booktitle = {2006 IEEE International Conference on Cluster Computing},
doi = {10.1109/CLUSTR.2006.311857},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Hartley et al. - 2006 - MSSG A Framework for Massive-Scale Semantic Graphs.pdf:pdf},
isbn = {1-4244-0327-8},
pages = {1--10},
publisher = {IEEE},
title = {{MSSG: A Framework for Massive-Scale Semantic Graphs}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4100363},
year = {2006}
}
@article{He2006,
abstract = {We propose to adapt the newly emerged cache-oblivious model to relational query processing. Our goal is to automatically achieve an overall performance comparable to that of ﬁne-tuned algorithms on a multi-level memory hierarchy. This automaticity is because cache-oblivious algorithms assume no knowledge about any spe- ciﬁc parameter values, such as the capacity and block size of each level of the hierarchy. As a ﬁrst step, we propose recursive par- titioning to implement cache-oblivious nested-loop joins (NLJs) without indexes, and recursive clustering and buffering to imple- ment cache-oblivious NLJs with indexes. Our theoretical results and empirical evaluation on three different architectures show that our cache-oblivious NLJs match the performance of their manually optimized, cache-conscious counterparts.},
author = {He, Bingsheng and Luo, Qiong},
doi = {10.1145/1183614.1183717},
isbn = {1595934332},
journal = {Proceedings of the 15th ACM international conference on Information and knowledge management CIKM 06},
keywords = {buffering,cache oblivious,cursive clustering,nested loop join,recursive partitioning},
pages = {718},
publisher = {ACM Press},
title = {{Cache-oblivious nested-loop joins}},
url = {http://portal.acm.org/citation.cfm?doid=1183614.1183717},
year = {2006}
}
@inproceedings{Heinecke2008,
abstract = {We present a parallel implementation of a cache oblivious algorithm for matrix multiplication on multicore platforms. The algorithm is based on a storage scheme and a block-recursive approach for multiplication, which are both based on a Peano space-filling curve. The recursion is stopped on matrix blocks with a size that needs to perfectly match the size of the L1 cache of the underlying CPU. The respective block multiplications are implemented by multiplication kernels that are hand-optimised for the SIMD units of current x86 CPUs. The Peano storage scheme is used to partition the block multiplications to different cores. Performance tests on various multicore platforms with up to 16 cores and different memory architecture show that the resulting implementation leads to better parallel scalability than achieved by Intel's MKL or GotoBLAS, and can outperform both libraries in terms of absolute performance on eight or more cores.},
address = {New York, New York, USA},
author = {Heinecke, Alexander and Brader, Michael},
booktitle = {Proceedings of the 2008 workshop on Memory access on future processors a solved problem? - MAW '08},
doi = {10.1145/1366219.1366223},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Heinecke, Brader - 2008 - Parallel matrix multiplication based on space-filling curves on shared memory multicore platforms.pdf:pdf},
isbn = {9781605580913},
keywords = {cache oblivious algorithms,curves,matrix multiplication,multicore,parallelisation,space filling,space-filling curves},
month = may,
pages = {385--392},
publisher = {ACM Press},
title = {{Parallel matrix multiplication based on space-filling curves on shared memory multicore platforms}},
url = {http://portal.acm.org/citation.cfm?doid=1366219.1366223 http://dl.acm.org/citation.cfm?id=1366219.1366223},
year = {2008}
}
@inproceedings{Heinecke2010,
address = {New York, New York, USA},
author = {Heinecke, Alexander and Trinitis, Carsten and Weidendorfer, Josef},
booktitle = {Proceedings of the 7th ACM international conference on Computing frontiers - CF '10},
doi = {10.1145/1787275.1787298},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Heinecke, Trinitis, Weidendorfer - 2010 - Porting existing cache-oblivious linear algebra HPC modules to larrabee architecture.pdf:pdf},
isbn = {9781450300445},
keywords = {accelerator space-filling curve,cache-oblivious,lu decomposition,manycore,matrix multiplication,openmp},
month = may,
pages = {91},
publisher = {ACM Press},
title = {{Porting existing cache-oblivious linear algebra HPC modules to larrabee architecture}},
url = {http://dl.acm.org/citation.cfm?id=1787275.1787298},
year = {2010}
}
@article{Hekimian-Williams2010,
author = {Hekimian-Williams, Cory and Grant, Brandon and Kumar, Piyush},
doi = {10.1109/RFID.2010.5467268},
isbn = {9781424457427},
journal = {2010 IEEE International Conference on RFID IEEE RFID 2010},
pages = {89--96},
publisher = {Ieee},
title = {{Accurate localization of RFID tags using phase difference}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5467268},
year = {2010}
}
@book{Higgins2000,
abstract = {This volume covers practical important topics in the analysis of protein sequences and structures. It includes comparing amino acid sequences to structures comparing structures to each other, searching information on entire protein families as well as searching with single sequences, how to use the Internet and how to set up and use the SRS molecular biology database management system. Finally, there are chapters on multiple sequence alignment and protein secondary structure prediction. This book will be invaluable to occasional users of these techniques as well as experienced professionals or researchers.},
edition = {1},
editor = {Higgins, Des and Taylor, Willie},
isbn = {0199637903},
keywords = {Bioinformatics,Sequence,databanks,structure},
language = {English},
pages = {272},
publisher = {Oxford University Press},
title = {{Bioinformatics }},
year = {2000}
}
@article{Higham2007,
abstract = {We formulate a discrete optimization problem that leads to a simple and informative derivation of a widely used class of spectral clustering algorithms. Regarding the algorithms as attempting to bi-partition a weighted graph with N vertices, our derivation indicates that they are inherently tuned to tolerate all partitions into two non-empty sets, independently of the cardinality of the two sets. This approach also helps to explain the difference in behaviour observed between methods based on the unnormalized and normalized graph Laplacian. We also give a direct explanation of why Laplacian eigenvectors beyond the Fiedler vector may contain fine-detail information of relevance to clustering. We show numerical results on synthetic data to support the analysis. Further, we provide examples where normalized and unnormalized spectral clustering is applied to microarray data-here the graph summarizes similarity of gene activity across different tissue samples, and accurate clustering of samples is a key task in bioinformatics. (C) 2006 Elsevier B.V. All rights reserved.},
author = {Higham, Desmond J. and Kalna, Gabriela and Kibble, Milla},
doi = {10.1016/j.cam.2006.04.026},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Higham, Kalna, Kibble - 2007 - Spectral clustering and its use in bioinformatics.pdf:pdf},
issn = {03770427},
journal = {Journal of Computational and Applied Mathematics},
keywords = {algorithms,balancing threshold,classification,discovery,fiedler vector,gene expression,gene-expression,graph laplacian,identification,matrices,maximum likelihood,microarray,microarray data,partitioning,prediction,printed,random graph,rayleigh-ritz theorem,scaling},
mendeley-tags = {printed},
month = jul,
number = {1},
pages = {25--37},
title = {{Spectral clustering and its use in bioinformatics}},
url = {http://apps.webofknowledge.com/full\_record.do?product=WOS\&search\_mode=Refine\&qid=10\&SID=V1dGdjIgFanpbLh1Kbe\&page=1\&doc=3 http://linkinghub.elsevier.com/retrieve/pii/S0377042706002366},
volume = {204},
year = {2007}
}
@article{Higham1990,
abstract = {The social brain hypothesis is a well-accepted and well-supported evolutionary theory of enlarged brain size in the non-human primates. Nevertheless, it tends to emphasize an anthropocentric view of social life and cognition. This often leads to confusion between ultimate and proximate mechanisms, and an over-reliance on a Cartesian, narratively structured view of the mind and social life, which in turn lead to views of social complexity that are congenial to our views of ourselves, rather than necessarily representative of primate social worlds. In this paper, we argue for greater attention to embodied and distributed theories of cognition, which get us away from current fixations on 'theory of mind' and other high-level anthropocentric constructions, and allow for the generation of testable hypotheses that combine neurobiology, psychology and behaviour in a mutually reinforcing manner.},
author = {Higham, Nicholas J},
doi = {10.1145/98267.98290},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Higham - 1990 - Exploiting fast matrix multiplication within the level 3 BLAS.pdf:pdf},
issn = {00983500},
journal = {ACM Transactions on Mathematical Software},
keywords = {printed},
mendeley-tags = {printed},
number = {4},
pages = {352--368},
title = {{Exploiting fast matrix multiplication within the level 3 BLAS}},
url = {http://portal.acm.org/citation.cfm?doid=98267.98290},
volume = {16},
year = {1990}
}
@article{Hills2010,
abstract = {A variety of coarse-grained (CG) models exists for simulation of proteins. An outstanding problem is the construction of a CG model with physically accurate conformational energetics rivaling all-atom force fields. In the present work, atomistic simulations of peptide folding and aggregation equilibria are force-matched using multiscale coarse-graining to develop and test a CG interaction potential of general utility for the simulation of proteins of arbitrary sequence. The reduced representation relies on multiple interaction sites to maintain the anisotropic packing and polarity of individual sidechains. CG energy landscapes computed from replica exchange simulations of the folding of Trpzip, Trp-cage and adenylate kinase resemble those of other reduced representations; non-native structures are observed with energies similar to those of the native state. The artifactual stabilization of misfolded states implies that non-native interactions play a deciding role in deviations from ideal funnel-like cooperative folding. The role of surface tension, backbone hydrogen bonding and the smooth pairwise CG landscape is discussed. Ab initio folding aside, the improved treatment of sidechain rotamers results in stability of the native state in constant temperature simulations of Trpzip, Trp-cage, and the open to closed conformational transition of adenylate kinase, illustrating the potential value of the CG force field for simulating protein complexes and transitions between well-defined structural states.},
author = {Hills, Ronald D and Lu, Lanyuan and Voth, Gregory A},
editor = {Nussinov, Ruth},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Hills, Lu, Voth - 2010 - Multiscale Coarse-Graining of the Protein Energy Landscape.pdf:pdf},
institution = {Department of Chemistry, James Franck Institute and Computation Institute, University of Chicago, Chicago, Illinois, United States of America.},
journal = {PLoS Computational Biology},
number = {6},
pages = {12},
publisher = {Public Library of Science},
title = {{Multiscale Coarse-Graining of the Protein Energy Landscape}},
url = {http://dx.plos.org/10.1371/journal.pcbi.1000827},
volume = {6},
year = {2010}
}
@article{Hotho1998,
author = {Hotho, A and Maedche, A and Staab, S},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Hotho, Maedche, Staab - 1998 - Ontology-based Text Document Clustering.pdf:pdf},
isbn = {3540008438},
journal = {KI},
number = {4},
pages = {1--13},
publisher = {Citeseer},
title = {{Ontology-based Text Document Clustering}},
url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.14.8083\&rep=rep1\&type=pdf},
volume = {16},
year = {1998}
}
@book{Huang2005,
abstract = {This book introduces an approach to protein folding from the point of view of kinetic theory. There is an abundance of data on protein folding, but few proposals are available on the mechanism driving the process. Here, presented for the first time, are suggestions on possible research directions, as developed by the author in collaboration with C C Lin. The first half of this invaluable book contains a concise but relatively complete review of relevant topics in statistical mechanics and kinetic theory. It includes standard topics such as thermodynamics, the Maxwell-Boltzmann distribution, and ensemble theory. Special discussions include the dynamics of phase transitions, and Brownian motion as an illustration of stochastic processes. The second half develops topics in molecular biology and protein structure, with a view to discovering mechanisms underlying protein folding. Attention is focused on the energy flow through the protein in its folded state. A mathematical model, based on the Brownian motion of coupled harmonic oscillators, is worked out in the appendix.},
author = {Huang, Kerson},
publisher = {World Scientific},
title = {{Lectures on Statistical Physics and Protein Folding}},
year = {2005}
}
@article{Huang1996,
abstract = {LAN-connected workstations are a heterogeneous environment, where each workstation provides time-varying computing power, and thus dynamic load balancing mechanisms are necessary for parallel applications to run efficiently. Parallel basic linear algebra subprograms (BLAS) have recently shown promise as a means of taking advantage of parallel computing in solving scientific problems. Most existing parallel algorithms of BLAS are designed for conventional parallel computers; they do not take the particular characteristics of LAN-connected workstations into consideration. This paper presents a parallelizing method of Level 3 BLAS for LAN-connected workstations. The parallelizing method makes dynamic load balancing throughcolumn-blockingdata distribution. The experiment results indicate that this dynamic load balancing mechanism really leads to a more efficient parallel level 3 BLAS for LAN-connected workstations.},
author = {Huang, Kuo-Chan and Wang, Feng-Jian and Wu, Pei-Chi},
doi = {10.1006/jpdc.1996.0126},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Huang, Wang, Wu - 1996 - Parallelizing a Level 3 BLAS Library for LAN-Connected Workstations.pdf:pdf},
issn = {07437315},
journal = {Journal of Parallel and Distributed Computing},
keywords = {printed},
mendeley-tags = {printed},
pages = {28--36},
title = {{Parallelizing a Level 3 BLAS Library for LAN-Connected Workstations}},
url = {http://www.sciencedirect.com/science/article/B6WKJ-45MG458-12/2/d60d8d6eb07229124b64cf578fac92d9},
volume = {38},
year = {1996}
}
@article{Hunold2004a,
abstract = {Matrix-matrix multiplication is one of the core computations in many algorithms from scientific computing or numerical analysis and many efficient realizations have been invented over the years, including many parallel ones. The current trend to use clusters of PCs or SMPs for scientific computing suggests to revisit matrix-matrix multiplication and investigate efficiency and scalability of different versions on clusters. In this paper we present parallel algorithms for matrix-matrix multiplication which are built up from several algorithms in a multilevel structure. Each level is associated with a hierarchical partition of the set of available processors into disjoint subsets so that deeper levels of the algorithm employ smaller groups of processors in parallel. We perform runtime experiments on several parallel platforms and show that multilevel algorithms can lead to significant performance gains compared with state-of-the-art methods.},
author = {Hunold, Sascha and Rauber, Thomas and R\"{u}nger, Gudula},
doi = {10.1145/1006209.1006230},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Hunold, Rauber, R\"{u}nger - 2004 - Multilevel hierarchical matrix multiplication on clusters.pdf:pdf},
isbn = {1581138393},
journal = {Proceedings of the 18th annual international conference on Supercomputing ICS 04},
keywords = {matrix multiplication,multiprocessor,printed,strassen,task parallelism,tasks},
mendeley-tags = {printed},
pages = {136},
publisher = {ACM Press},
series = {ICS '04},
title = {{Multilevel hierarchical matrix multiplication on clusters}},
url = {http://portal.acm.org/citation.cfm?doid=1006209.1006230},
year = {2004}
}
@article{Hunold2008,
abstract = {This paper presents parallel algorithms for matrixmatrix multiplication which are built up from several algorithms in a multi-level structure. The upper level consists of Strassens algorithm which is performed for a predefined number of recursions. The number of recursions can be adapted to the specific execution platform. The intermediate level is performed by a parallel non-hierarchical algorithm and the lower level uses efficient one-processor implementations of matrixmatrix multiplication like BLAS or ATLAS. Both the number of recursions of Strassens algorithm and the specific algorithms of the intermediate and lower level can be chosen so that a variety of different multi-level algorithms results. Each level of the multi-level algorithms is associated with a hierarchical partition of the set of available processors into disjoint subsets so that deeper levels of the algorithm employ smaller groups of processors in parallel. The algorithms are expressed in the multiprocessor task programming model and are coded with the runtime library Tlib. Performance experiments on several parallel platforms show that the multi-level algorithms can lead to significant performance gains.},
author = {Hunold, Sascha and Rauber, Thomas and R\"{u}nger, Gudula},
doi = {10.1016/j.parco.2008.03.003},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Hunold, Rauber, R\"{u}nger - 2008 - Combining building blocks for parallel multi-level matrix multiplication.pdf:pdf},
issn = {01678191},
journal = {Parallel Computing},
keywords = {printed},
mendeley-tags = {printed},
number = {6-8},
pages = {411--426},
publisher = {Elsevier},
title = {{Combining building blocks for parallel multi-level matrix multiplication}},
url = {http://linkinghub.elsevier.com/retrieve/pii/S0167819108000355},
volume = {34},
year = {2008}
}
@article{Hunold2004,
abstract = {We consider the realization of matrix-matrix multiplication and propose a hierarchical algorithm implemented in a task-parallel way using multiprocessor tasks on distributed memory. The algorithm has been designed to minimize the communication overhead while showing large locality of memory references. The task-parallel realization makes the algorithm especially suited for cluster of SMPs since tasks can then be mapped to the different cluster nodes in order to efficiently exploit the cluster architecture. Experiments on current cluster machines show that the resulting execution times are competitive with state-of-the-art methods like PDGEMM .},
address = {Berlin, Heidelberg},
author = {Hunold, Sascha and Rauber, Thomas and R\"{u}nger, Gudula and Bubak, Marian and van Albada, Geert and Sloot, Peter and Dongarra, Jack},
doi = {10.1007/978-3-540-24687-9\_1},
editor = {Bubak, Marian and Albada, Geert Dick and Sloot, Peter M. A. and Dongarra, Jack},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Hunold et al. - 2004 - Hierarchical Matrix-Matrix Multiplication Based on Multiprocessor Tasks.pdf:pdf},
isbn = {978-3-540-22115-9},
journal = {Lecture Notes in Computer Science},
keywords = {Computer Science,printed},
mendeley-tags = {printed},
pages = {1--8},
publisher = {Springer Berlin Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{Hierarchical Matrix-Matrix Multiplication Based on Multiprocessor Tasks}},
url = {http://www.springerlink.com/content/ql29nquy5xh9h6la/ http://www.springerlink.com/index/10.1007/b97988},
volume = {3037},
year = {2004}
}
@article{Huss-Lederman1994,
abstract = {This paper compares two general library routines for performing parallel distributed matrix multiplication. The PUMMA algorithm utilizes block scattered data layout, whereas BiMMer utilizes virtual 2-D torus wrap. The algorithmic differences resulting from these different layouts are discussed as well as the general issues associated with different data layouts for library routines. Results on the Intel Delta for two matrix multiplication algorithms are presented.},
author = {Huss-Lederman, S and Jacobson, E M and Tsao, A},
doi = {10.1109/SPLC.1993.365573},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Huss-Lederman, Jacobson, Tsao - 1994 - Comparison of Scalable Parallel Matrix Multiplication Libraries.pdf:pdf},
isbn = {0818649801},
journal = {Proceedings of the Scalable Parallel Libraries Conference October 68 1993 Mississippi State Mississippi},
keywords = {printed},
mendeley-tags = {printed},
pages = {142--149},
publisher = {IEEE Comput. Soc. Press},
title = {{Comparison of Scalable Parallel Matrix Multiplication Libraries}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=365573},
year = {1994}
}
@article{Irony2004,
abstract = {We present lower bounds on the amount of communication that matrix multiplication algorithms must perform on a distributed-memory parallel computer. We denote the number of processors by P and the dimension of square matrices by n. We show that the most widely used class of algorithms, the so-called two-dimensional (2D) algorithms, are optimal, in the sense that in any algorithm that only uses O(n(2)/p) words of memory per processor, at least one processor must send or receive Omega(n(2)/p(1/2)) words. We also show that algorithms from another class, the so-called three-dimensional (3D) algorithms, are also optimal. These algorithms use replication to reduce communication. We show that in any algorithm that uses Omega(n(2)/p(2/3)) words of memory per processor, at least one processor must send or receive Omega(n(2)/p(2/3)) words. Furthermore, we show a continuous tradeoff between the size of local memories and the amount of communication that must be performed. The 2D and 3D bounds are essentially instantiations of this tradeoff. We also show that if the input is distributed across the local memories of multiple nodes without replication, then Omega(n(2)) words must cross any bisection cut of the machine. All our bounds apply only to conventional o(n(3)) algorithms. They do not apply to Strassen's algorithm or other Theta(n(3)) algorithms. (C) 2004 Elsevier Inc. All rights reserved.},
author = {Irony, Dror and Toledo, Sivan and Tiskin, Alexander},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Irony, Toledo, Tiskin - 2004 - Communication lower bounds for distributed-memory matrix multiplication.pdf:pdf},
journal = {Journal of Parallel and Distributed Computing},
keywords = {printed},
mendeley-tags = {printed},
number = {9},
pages = {1017--1026},
publisher = {ACADEMIC PRESS INC ELSEVIER SCIENCE},
title = {{Communication lower bounds for distributed-memory matrix multiplication}},
url = {http://dx.doi.org/10.1016/j.jpdc.2004.03.021},
volume = {64},
year = {2004}
}
@inproceedings{Ao2009,
author = {Islam, Nazrul and Islam, Shohidul and Kashem, M. A. and Islam, M. R. and S., Islam M.},
booktitle = {Proceedings of the International MultiConference of Engineers and Computer Scientists 2009},
editor = {Ao, S. I. and Gelman, Len and Hukins, David WL and Hunter, Andrew and Korsunsky, A. M.},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Islam et al. - 2009 - An Empirical Distributed Matrix Multiplication Algorithm to Reduce Time Complexity.pdf:pdf},
isbn = {9789881701275},
keywords = {Distributed Homogeneous Complexity,Matrix Multiplication,Sequential Algorithm,Time System,printed},
language = {English},
mendeley-tags = {printed},
pages = {pp2171--2173},
publisher = {Newswood Limited},
title = {{An Empirical Distributed Matrix Multiplication Algorithm to Reduce Time Complexity}},
year = {2009}
}
@article{Ismail2011,
abstract = {With the advent of multi-cores every processor has built-in parallel computational power and that can only be fully utilized only if the program in execution is written accordingly. This study is a part of an on-going research for designing of a new parallel Programming model for multicore architectures. In this paper we have presented a simple, highly efficient and scalable implementation of a common matrix multiplication algorithm using a newly developed parallel programming model SPC3 PM for general purpose multi-core processors. From our study it is found that matrix multiplication done concurrently on multi-cores using SPC3 PM requires much less execution time than that required using the present standard parallel programming environments like OpenMP. Our approach also shows scalability, better and uniform speedup and better utilization of available cores than that the algorithm written using standard OpenMP or similar parallel programming tools. We have tested our approach for up to 24 cores with different matrices size varying from 100 x 100 to 10000 x 10000 elements. And for all these tests our proposed approach has shown much improved performance and scalability},
author = {Ismail, Muhammad Ali and Mirza, S. H. and Altaf, Talat},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Ismail, Mirza, Altaf - 2011 - Concurrent Matrix Multiplication on Multi-Core Processor.pdf:pdf},
journal = {International Journal of Computer Science and Security (IJCSS},
keywords = {Concurrent Programming,Matrix Multiplication,Parallel Programming,multicore,printed},
mendeley-tags = {printed},
number = {2},
pages = {208--220},
title = {{Concurrent Matrix Multiplication on Multi-Core Processor}},
url = {http://cscjournals.org/csc/manuscript/Journals/IJCSS/volume5/Issue2/IJCSS-465.pdf},
volume = {5},
year = {2011}
}
@book{Istas2005,
author = {Istas, Jacques},
booktitle = {SciencesNew York},
isbn = {9783540253051},
number = {February},
pages = {490},
publisher = {Ellis Horwood Ltd},
title = {{Mathematical Modeling for the Life Sciences}},
url = {http://dx.doi.org/10.1007/3-540-27877-X},
year = {2005}
}
@book{Jacobi2006,
abstract = {JavaServer Faces (JSF) technology is a Java user interface (UI) framework that simplifies the building of Java Web applications. Pro JSF shows developers how to leverage the full potential of JavaServer Faces. It is not an entry-level tutorial but a book about building effective JSF components for sophisticated, enterprise-level Rich Internet Applications, moving straight to what makes this such a powerful and flexible technology: the JSF component.

Whereas current books on the market don't stray far beyond basic HTML-rendering, this book's whole focus is on building custom JSF UI components that allow you to target your application at any client – IE, the Mozilla browser, or a PDA. It demonstrates best-practice development of common UI components (such as date field and menu) and goes on to provide practical, end-to-end JSF techniques for building versatile client-agnostic Java web applications, utilizing best-of-breed RIA rendering technologies such as XUL and HTC.

Written by JSF experts and verified by established community figures – including Adam Winer (member of JSF Expert Group) and Kito Mann (JSFCentral.com and JSF in Action) – this book provides reliable and groundbreaking JSF components for developers who are looking to fully exploit the power of JSF in their Java Web applications.},
author = {Jacobi, Jonas and {Fallows. John R.}},
edition = {1},
isbn = {978-1590595800},
pages = {464},
publisher = {Apress},
title = {{Pro JSF and Ajax: Building Rich Internet Components }},
year = {2006}
}
@inproceedings{Jacobsen2001,
abstract = {When search trees are made relaxed, balance constraints are weakened such that updates can be made without immediate rebalancing. This can lead to a speed-up in some circumstances. However, the weakened balance constraints also make it more challenging to prove complexity results for relaxed structures.In our opinion, one of the simplest and most intuitive presentations of balanced search trees has been given via layered trees. We show that relaxed layered trees are among the best of the relaxed structures. More precisely, rebalancing is worst-case logarithmic and amortized constant per update, and restructuring is worst-case constant per update.},
author = {Jacobsen, Lars and Larsen, Kim S.},
booktitle = {ICTCS '01 Proceedings of the 7th Italian Conference on Theoretical Computer},
isbn = {3-540-42672-8},
month = oct,
pages = {269--284},
publisher = {Springer-Verlag},
title = {{Complexity of Layered Binary Search Trees with Relaxed Balance}},
url = {http://dl.acm.org/citation.cfm?id=646293.687372},
year = {2001}
}
@article{Jain2010,
abstract = {Organizing data into sensible groupings is one of the most fundamental modes of understanding and learning. As an example, a common scheme of scientific classification puts organisms into a system of ranked taxa: domain, kingdom, phylum, class, etc. Cluster analysis is the formal study of methods and algorithms for grouping, or clustering, objects according to measured or perceived intrinsic characteristics or similarity. Cluster analysis does not use category labels that tag objects with prior identifiers, i.e., class labels. The absence of category information distinguishes data clustering (unsupervised learning) from classification or discriminant analysis (supervised learning). The aim of clustering is to find structure in data and is therefore exploratory in nature. Clustering has a long and rich history in a variety of scientific fields. One of the most popular and simple clustering algorithms, K-means, was first published in 1955. In spite of the fact that K-means was proposed over 50 years ago and thousands of clustering algorithms have been published since then, K-means is still widely used. This speaks to the difficulty in designing a general purpose clustering algorithm and the ill-posed problem of clustering. We provide a brief overview of clustering, summarize well known clustering methods, discuss the major challenges and key issues in designing clustering algorithms, and point out some of the emerging and useful research directions, including semi-supervised clustering, ensemble clustering, simultaneous feature selection during data clustering, and large scale data clustering.},
author = {Jain, Anil K.},
doi = {10.1016/j.patrec.2009.09.011},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Jain - 2010 - Data clustering 50 years beyond K-means☆.pdf:pdf},
issn = {01678655},
journal = {Pattern Recognition Letters},
keywords = {Data clustering,Historical developments,King-Sun Fu prize,Perspectives on clustering,User’s dilemma},
month = jun,
number = {8},
pages = {651--666},
title = {{Data clustering: 50 years beyond K-means☆}},
url = {http://linkinghub.elsevier.com/retrieve/pii/S0167865509002323},
volume = {31},
year = {2010}
}
@article{Jain1999,
abstract = {Clustering is the unsupervised classification of patterns (observations, data items, or feature vectors) into groups (clusters). The clustering problem has been addressed in many contexts and by researchers in many disciplines; this reflects its broad appeal and usefulness as one of the steps in exploratory data analysis. However, clustering is a difficult problem combinatorially, and differences in assumptions and contexts in different communities has made the transfer of useful generic concepts and methodologies slow to occur. This paper presents an overview of pattern clustering methods from a statistical pattern recognition perspective, with a goal of providing useful advice and references to fundamental concepts accessible to the broad community of clustering practitioners. We present a taxonomy of clustering techniques, and identify cross-cutting themes and recent advances. We also describe some important applications of clustering algorithms such as image segmentation, object recognition, and information retrieval.},
author = {Jain, Anil K. and Murty, M N and Flynn, P J},
doi = {10.1145/331499.331504},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Jain, Murty, Flynn - 1999 - Data clustering a review.pdf:pdf},
institution = {ACM Computing Surveys},
issn = {03600300},
journal = {ACM Computing Surveys},
number = {3},
pages = {264--323},
publisher = {ACM},
series = {CSUR},
title = {{Data clustering: a review}},
url = {http://portal.acm.org/citation.cfm?doid=331499.331504},
volume = {31},
year = {1999}
}
@article{Jambawalikar2008,
abstract = {We study the problem of computing the minimum volume enclosing ellipsoid (MVEE) containing a given set of ellipsoids S = \{E1, E2, hellip, En\} sube Ropfd. We show how to efficiently compute a small set X sube S of size at most a = |X| = O(d2/epsi ) whose minimum volume ellipsoid is an (1 + epsi)-approximation to the minimum volume ellipsoid of S. We use an augmented real num ber model of computation to achieve a running time of O(alpha(ndomega + d3)) where omega < 2.376 is the exponent of square matrix multiplication. This is the best known complexity for solving the MVEE problem when n Gt d and e is large. The algorithm is built on the previous work by Kumar and Yrfdirim [17].},
author = {Jambawalikar, Sachin and Brook, Stony},
doi = {10.1109/ICCSA.2008.24},
isbn = {9780769532431},
journal = {2008 International Conference on Computational Sciences and Its Applications},
keywords = {Computational Geometry,Convex Optimization,Minimum Volume Enclosing Ellipsoids,not printed},
mendeley-tags = {not printed},
pages = {478--487},
publisher = {Ieee},
title = {{A note on Approximate Minimum Volume Enclosing Ellipsoid of Ellipsoids}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4561253},
year = {2008}
}
@article{Jelfimova2007,
author = {Jelfimova, L D},
doi = {10.1007/s10559-010-9233-y},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Jelfimova - 2010 - Fast hybrid matrix multiplication algorithms.pdf:pdf},
issn = {1060-0396},
journal = {Cybernetics and Systems Analysis},
keywords = {printed},
mendeley-tags = {printed},
month = aug,
number = {4},
pages = {563--573},
publisher = {Kluwer Academic Publishers},
title = {{Fast hybrid matrix multiplication algorithms}},
url = {http://dx.doi.org/10.1007/s10559-010-9233-y http://www.springerlink.com/index/10.1007/s10559-010-9233-y},
volume = {46},
year = {2010}
}
@article{Jiang2000,
abstract = {In this paper, a practical method is presented that allows for the compact representation of sparse matrices. We have employed some random hash functions and applied the rehash technique to the compression of sparse matrices. Using our method, a large-scale sparse matrix can be compressed into some condensed tables. The zero elements of the original matrix can be determined directly by these condensed tables, and the values of nonzero elements can be recovered in a row major order. Moreover, the space occupied by these condensed tables is small. Though the elements cannot be referenced directly, the compression result can be transmitted progressively. Performance evaluation shows that our method has achieved quite some effective improvement for the compression of randomly distributed sparse matrices.},
annote = {Deals with compression of sparse matrices optimal for transmission.
      },
author = {Jiang, Ji-Han and Chang, Chin-Chen and Chen, Tung-Shou},
doi = {10.1016/S0169-023X(99)00017-8},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Jiang, Chang, Chen - 2000 - A compact sparse matrix representation using random hash functions.pdf:pdf},
issn = {0169023X},
journal = {Data \& Knowledge Engineering},
keywords = {data compression,data filtering,hash function,matrix compression,printed,progressive transmission,rehash,sparse matrix},
mendeley-tags = {printed},
month = jan,
number = {1},
pages = {29--49},
title = {{A compact sparse matrix representation using random hash functions}},
url = {http://dx.doi.org/10.1016/S0169-023X(99)00017-8},
volume = {32},
year = {2000}
}
@book{Jones2006,
address = {Cambridge, MA, U.S.A., 2004.},
author = {Jones, Neil C and Pevzner, Pavel A},
booktitle = {Leonardo},
isbn = {0-262-10106-8},
issn = {0024094X},
number = {5},
pages = {434},
publisher = {MIT Press},
title = {{An Introduction to Bioinformatics Algorithms}},
url = {http://books.google.com/books?hl=en\&amp;lr=\&amp;id=p\_qzpkNVcUwC\&amp;oi=fnd\&amp;pg=PR15\&amp;dq=An+Introduction+to+Bioinformatics+Algorithms\&amp;ots=qtMcW7E61z\&amp;sig=6HZa9eBrmxsA4KA4O-e2I12gra8},
volume = {39},
year = {2006}
}
@inproceedings{Jung2008,
abstract = {Proteins combine with other materials to achieve a variety of functions, which will be similar if their active sites are similar. Thus we can infer a proteinpsilas function by identifying its binding area. This paper proposes a novel method to select a proteinpsilas binding area using the Markov Cluster (MCL) algorithm. A distance matrix is constructed from the surface residues distance on the protein, then transformed to the connectivity matrix for application of the MCL process, and finally evaluated by using Catalytic Site Atlas (CSA) data. In the experimental result using CSA data which comprised 94 selected single chain proteins, our algorithm detects 91 (97\%) binding areas near the active site of each protein. We introduced new geometrical features with the aim of improving the prediction accuracy of the active site residues by selecting the residues near the active site.},
author = {Jung, Kwang Su and Jin, Yu. Ki and Chung, Yong Je and Ryu, Keun Ho},
booktitle = {2008 8th IEEE International Conference on Computer and Information Technology},
doi = {10.1109/CIT.2008.4594731},
isbn = {978-1-4244-2357-6},
keywords = {printed},
mendeley-tags = {printed},
month = jul,
pages = {532--537},
publisher = {IEEE},
title = {{Investigating the binding area of protein surface using MCL algorithm}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4594731},
year = {2008}
}
@book{Kalinovsky2004,
abstract = {As a Java developer, you may find yourself in a situation where you have to maintain someone else's code or use a third-party's library for your own application without documentation of the original source code. Rather than spend hours feeling like you want to bang your head against the wall, turn to Covert Java: Techniques for Decompiling, Patching, and Reverse Engineering. These techniques will show you how to better understand and work with third-party applications. Each chapter focuses on a technique to solve a specific problem, such as obfuscation in code or scalability vulnerabilities, outlining the issue and demonstrating possible solutions. Summaries at the end of each chapter will help you double check that you understood the crucial points of each lesson. You will also be able to download all code examples and sample applications for future reference from the publisher's website. Let Covert Java help you crack open mysterious codes!},
author = {Kalinovsky, Alex},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Kalinovsky - 2004 - Covert Java Techniques for Decompiling, Patching, and Reverse Engineering.pdf:pdf},
isbn = {978-0672326387},
language = {English},
pages = {288},
publisher = {Sams},
title = {{Covert Java: Techniques for Decompiling, Patching, and Reverse Engineering}},
year = {2004}
}
@inproceedings{Xiao2004,
abstract = {Poster Session 5: Statistical Methods},
author = {Kanade, T.},
booktitle = {Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.},
doi = {10.1109/CVPR.2004.1315218},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Kanade - 2004 - Robust subspace clustering by combined use of kNND metric and SVD algorithm.pdf:pdf},
isbn = {0-7695-2158-4},
number = {1},
pages = {592--599},
publisher = {IEEE},
title = {{Robust subspace clustering by combined use of kNND metric and SVD algorithm}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=1315218},
volume = {2},
year = {2004}
}
@inproceedings{Kang2009,
abstract = {In this paper, we describe PEGASUS, an open source peta graph mining library which performs typical graph mining tasks such as computing the diameter of the graph, computing the radius of each node and finding the connected components. as the size of graphs reaches several giga-, tera- or peta-bytes, the necessity for such a library grows too. To the best of our knowledge, PEGASUS is the first such library, implemented on the top of the HADOOP platform, the open source version of MAPREDUCE. Many graph mining operations (PageRank, spectral clustering, diameter estimation, connected components etc.) are essentially a repeated matrix-vector multiplication. In this paper we describe a very important primitive for PEGASUS, called GIM-V (generalized iterated matrix-vector multiplication). GIM-V is highly optimized, achieving (a) good scale-up on the number of available machines (b) linear running time on the number of edges, and (c) more than 5 times faster performance over the non-optimized version of GIM-V. Our experiments ran on M45, one of the top 50 supercomputers in the world. We report our findings on several real graphs, including one of the largest publicly available Web graphs, thanks to Yahoo!, with \~{A}\^{A} 6,7 billion edges.},
author = {Kang, U and Tsourakakis, Charalampos E. and Faloutsos, Christos},
booktitle = {2009 9th IEEE International Conference on Data Mining},
doi = {10.1109/ICDM.2009.14},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Kang, Tsourakakis, Faloutsos - 2009 - PEGASUS A Peta-Scale Graph Mining System Implementation and Observations.pdf:pdf},
isbn = {978-1-4244-5242-2},
issn = {15504786},
keywords = {diame,gim v,graph mining,hadoop,operations,optimizations,pagerank,pegasus,printed,random walk with restart,several graph mining},
mendeley-tags = {printed},
month = dec,
organization = {Carnegie Mellon University},
pages = {229--238},
publisher = {IEEE},
title = {{PEGASUS: A Peta-Scale Graph Mining System Implementation and Observations}},
url = {http://ieeexplore.ieee.org/xpls/abs\_all.jsp?arnumber=5360248 http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5360248},
volume = {31132},
year = {2009}
}
@article{Kang2010,
abstract = {In this paper, we describe PeGaSus, an open source Peta GraphMining library which performs typical graph mining tasks such as computing the diameter of the graph, computing the radius of each node, finding the connected components, and computing the importance score of nodes. As the size of graphs reaches several Giga-, Tera- or Peta-bytes, the necessity for such a library grows too. To the best of our knowledge, PeGaSus is the first such library, implemented on the top of the Hadoop platform, the open source version of MapReduce. Many graph mining operations (PageRank, spectral clustering, diameter estimation, connected components, etc.) are essentially a repeated matrix-vector multipli- cation. In this paper, we describe a very important primitive for PeGaSus, called GIM-V (generalized iterated matrix-vector multiplication). GIM-V is highly optimized, achieving (a) good scale-up on the number of availablemachines, (b) linear running time on the number of edges, and (c) more than 5 times faster performance over the non-optimized version of GIM-V. Our experiments ran on M45, one of the top 50 supercomputers in the world. We report our findings on several real graphs, including one of the largest publicly availableWeb graphs, thanks to Yahoo!, with 6.7 billion edges.},
author = {Kang, U and Tsourakakis, Charalampos E. and Faloutsos, Christos},
doi = {10.1007/s10115-010-0305-0},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Kang, Tsourakakis, Faloutsos - 2010 - PEGASUS mining peta-scale graphs.pdf:pdf},
issn = {02191377},
journal = {Knowledge and Information Systems},
keywords = {generalized iterative matrix vector,gim v,graph mining,hadoop,multiplication,pegasus,printed},
mendeley-tags = {printed},
number = {2},
pages = {303--325},
publisher = {Springer},
title = {{PEGASUS: mining peta-scale graphs}},
url = {http://www.springerlink.com/index/10.1007/s10115-010-0305-0},
volume = {27},
year = {2010}
}
@inproceedings{Kannan,
abstract = {We motivate and develop a natural bicriteria measure for assessing the quality of a clustering that avoids the drawbacks of existing measures. A simple recursive heuristic is shown to have poly-logarithmic worst-case guarantees under the new measure. The main result of the article is the analysis of a popular spectral algorithm. One variant of spectral clustering turns out to have effective worst-case guarantees; another finds a "good" clustering, if one exists.},
author = {Kannan, Ravi and Vempala, S and Veta, A.},
booktitle = {Proceedings 41st Annual Symposium on Foundations of Computer Science},
doi = {10.1109/SFCS.2000.892125},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Kannan, Vempala, Veta - 2000 - On clusterings-good, bad and spectral.pdf:pdf},
isbn = {0-7695-0850-2},
keywords = {algorithms,approximation algorithms,clustering,graph algorithms,printed,spectral methods,theory},
mendeley-tags = {printed},
pages = {367--377},
publisher = {IEEE Comput. Soc},
title = {{On clusterings-good, bad and spectral}},
url = {http://apps.webofknowledge.com/full\_record.do?product=WOS\&search\_mode=Refine\&qid=6\&SID=V1dGdjIgFanpbLh1Kbe\&page=2\&doc=20 http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=892125},
year = {2000}
}
@inproceedings{Karloff2010,
abstract = {In recent years the MapReduce framework has emerged as one of the most widely used parallel computing platforms for processing data on terabyte and petabyte scales. Used daily at companies such as Yahoo!, Google, Amazon, and Facebook, and adopted more recently by several universities, it allows for easy parallelization of data intensive computations over many machines. One key feature of MapReduce that differentiates it from previous models of parallel computation is that it interleaves sequential and parallel computation. We propose a model of efficient computation using the MapReduce paradigm. Since MapReduce is designed for computations over massive data sets, our model limits the number of machines and the memory per machine to be substantially sublinear in the size of the input. On the other hand, we place very loose restrictions on the computational power of of any individual machine— our model allows each machine to perform sequential computations in time polynomial in the size of the original input. We compare MapReduce to the PRAM model of computation. We prove a simulation lemma showing that a large class of PRAM algorithms can be efficiently simulated via MapReduce. The strength of MapReduce, however, lies in the fact that it uses both sequential and parallel computation. We demonstrate how algorithms can take advantage of this fact to compute an MST of a dense graph in only two rounds, as opposed to Ω(log(n)) rounds needed in the standard PRAM model. We show how to evaluate a wide class of functions using the MapReduce framework. We conclude by applying this result to show how to compute some basic algorithmic problems such as undirected s-t connectivity in the MapReduce framework. 1},
author = {Karloff, Howard and Suri, Siddharth and Vassilvitskii, Sergei},
booktitle = {Symposium on Discrete Algorithms (SODA) (2010)},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Karloff, Suri, Vassilvitskii - 2010 - A model of computation for MapReduce.pdf:pdf},
isbn = {978-0-898716-98-6},
keywords = {not printed},
mendeley-tags = {not printed},
month = jan,
pages = {938--948},
publisher = {SIAM},
title = {{A model of computation for MapReduce}},
url = {http://dl.acm.org/citation.cfm?id=1873601.1873677},
year = {2010}
}
@phdthesis{Karlsson2011,
author = {Karlsson, Lars},
pages = {1--56},
school = {Ume\aa University},
title = {{Scheduling of parallel matrix computations and data layout conversion for hpc and multi-core architectures}},
type = {Phd Thesis},
url = {http://www8.cs.umu.se/research/uminf/reports/2011/004/part1.pdf},
year = {2011}
}
@article{Karp2003,
author = {Karp, Richard M. and Shenker, Scott and Papadimitriou, Christos H.},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Karp, Shenker, Papadimitriou - 2003 - A simple algorithm for finding frequent elements in streams and bags.pdf:pdf},
issn = {03625915},
journal = {ACM Transactions on Database Systems},
keywords = {Data stream,frequent elements},
month = mar,
number = {1},
pages = {51--55},
title = {{A simple algorithm for finding frequent elements in streams and bags}},
url = {http://dl.acm.org/citation.cfm?id=762471.762473},
volume = {28},
year = {2003}
}
@book{Kaufman1990,
abstract = {Un livre general sur le clustering.},
author = {Kaufman, L and Rousseeuw, P J},
booktitle = {Intensive Care Medicine},
chapter = {3},
doi = {10.1007/s00134-006-0431-z},
editor = {Kaufman, L Rousseeuw},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Kaufman, Rousseeuw - 1990 - Finding Groups in Data An Introduction to Cluster Analysis.pdf:pdf},
isbn = {0471878766},
issn = {03424642},
number = {1},
pages = {368},
pmid = {17093986},
publisher = {John Wiley \& Sons},
series = {Wiley Series in Probability and Mathematical Statistics},
title = {{Finding Groups in Data: An Introduction to Cluster Analysis}},
url = {http://www.amazon.com/Finding-Groups-Data-Introduction-Analysis/dp/0471878766},
volume = {33},
year = {1990}
}
@article{Kawaji2004,
author = {Kawaji, H. and Takenaka, Y. and Matsuda, H.},
doi = {10.1093/bioinformatics/btg397},
issn = {1367-4803},
journal = {Bioinformatics (Oxford, England)},
keywords = {printed},
mendeley-tags = {printed},
month = jan,
number = {2},
pages = {243--252},
title = {{Graph-based clustering for finding distant relationships in a large set of protein sequences}},
url = {http://bioinformatics.oxfordjournals.org/cgi/doi/10.1093/bioinformatics/btg397},
volume = {20},
year = {2004}
}
@inproceedings{Kestur2010,
abstract = {High Performance Computing (HPC) or scientific codes are being executed across a wide variety of computing platforms from embedded processors to massively parallel GPUs. We present a comparison of the Basic Linear Algebra Subroutines (BLAS) using double-precision floating point on an FPGA, CPU and GPU. On the CPU and GPU, we utilize standard libraries on state-of-the-art devices. On the FPGA, we have developed parameterized modular implementations for the dotproduct and Gaxpy or matrix-vector multiplication. In order to obtain optimal performance for any aspect ratio of the matrices, we have designed a high-throughput accumulator to perform an efficient reduction of floating point values. To support scalability to large data-sets, we target the BEE3 FPGA platform. We use performance and energy efficiency as metrics to compare the different platforms. Results show that FPGAs offer comparable performance as well as 2.7 to 293 times better energy efficiency for the test cases that we implemented on all three platforms.},
author = {Kestur, Srinidhi and Davis, John D and Williams, Oliver},
booktitle = {Proceedings of the 2010 IEEE Annual Symposium on VLSI},
doi = {10.1109/ISVLSI.2010.84},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Kestur, Davis, Williams - 2010 - BLAS Comparison on FPGA, CPU and GPU.pdf:pdf},
isbn = {9781424473212},
keywords = {printed},
mendeley-tags = {printed},
pages = {288--293},
publisher = {IEEE Computer Society},
series = {ISVLSI '10},
title = {{BLAS Comparison on FPGA, CPU and GPU}},
url = {http://research.microsoft.com/pubs/130834/ISVLSI\_FINAL.pdf},
year = {2010}
}
@article{Khor2010,
abstract = {The author explores the application of graph colouring to biological networks, specifically protein-protein interaction (PPI) networks. First, the author finds that given similar conditions (i.e. graph size, degree distribution and clustering), fewer colours are needed to colour disassortative than assortative networks. Fewer colours create fewer independent sets which in turn imply higher concurrency potential for a network. Since PPI networks tend to be disassortative, the author suggests that in addition to functional specificity and stability proposed previously by Maslov and Sneppen (Science, 296, 2002), the disassortative nature of PPI networks may promote the ability of cells to perform multiple, crucial and functionally diverse tasks concurrently. Second, because graph colouring is closely related to the presence of cliques in a graph, the significance of node colouring information to the problem of identifying protein complexes (dense subgraphs in PPI networks), is investigated. The author finds that for PPI networks where 1-11\% of nodes participate in at least one identified protein complex, such as H. sapien, DSATUR (a well-known complete graph colouring algorithm) node colouring information can improve the quality (homogeneity and separation) of initial candidate complexes. This finding may help improve existing protein complex detection methods, and/or suggest new methods. [Includes supplementary material].},
archivePrefix = {arXiv},
arxivId = {0912.3461},
author = {Khor, S},
doi = {10.1049/iet-syb.2009.0038},
eprint = {0912.3461},
issn = {1751-8849},
journal = {IET systems biology},
keywords = {Algorithms,Biological,Color,Computer Graphics,Computer Simulation,Models,Protein Interaction Mapping,Protein Interaction Mapping: methods,Proteome,Proteome: metabolism,Signal Transduction,Signal Transduction: physiology,printed},
mendeley-tags = {printed},
month = may,
number = {3},
pages = {185--92},
pmid = {20499999},
title = {{Application of graph colouring to biological networks.}},
url = {http://arxiv.org/abs/0912.3461},
volume = {4},
year = {2010}
}
@article{Kim2011,
abstract = {SUMMARY: ReMark is a fully automatic tool for clustering orthologs by combining a Recursive and a Markov clustering (MCL) algorithms. The ReMark detects and recursively clusters ortholog pairs through reciprocal BLAST best hits between multiple genomes running software program (RecursiveClustering.java) in the first step. Then, it employs MCL algorithm to compute the clusters (score matrices generated from the previous step) and refines the clusters by adjusting an inflation factor running software program (MarkovClustering.java). This method has two key features. One utilizes, to get more reliable results, the diagonal scores in the matrix of the initial ortholog clusters. Another clusters orthologs flexibly through being controlled naturally by MCL with a selected inflation factor. Users can therefore select the fitting state of orthologous protein clusters by regulating the inflation factor according to their research interests. AVAILABILITY AND IMPLEMENTATION: Source code for the orthologous protein clustering software is freely available for non-commercial use at http://dasan.sejong.ac.kr/\~{}wikim/notice.html, implemented in Java 1.6 and supported on Windows and Linux.},
author = {Kim, Kangseok and Kim, Wonil and Kim, Sunshin},
doi = {10.1093/bioinformatics/btr259},
issn = {1367-4811},
journal = {Bioinformatics (Oxford, England)},
month = jun,
number = {12},
pages = {1731--3},
pmid = {21546394},
title = {{ReMark: an automatic program for clustering orthologs flexibly combining a Recursive and a Markovclustering algorithms.}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/21546394},
volume = {27},
year = {2011}
}
@article{King2004,
abstract = {Understanding principles of cellular organization and function can be enhanced if we detect known and predict still undiscovered protein complexes within the cell's protein-protein interaction (PPI) network. Such predictions may be used as an inexpensive tool to direct biological experiments. The increasing amount of available PPI data necessitates an accurate and scalable approach to protein complex identification.},
author = {King, A D and Przulj, N and Jurisica, I},
doi = {10.1093/bioinformatics/bth351},
issn = {1367-4803},
journal = {Bioinformatics (Oxford, England)},
keywords = {Algorithms,Animals,Biological,Caenorhabditis elegans Proteins,Caenorhabditis elegans Proteins: metabolism,Cluster Analysis,Computer Simulation,Drosophila Proteins,Drosophila Proteins: metabolism,Models,Multienzyme Complexes,Multienzyme Complexes: metabolism,Protein Interaction Mapping,Protein Interaction Mapping: methods,Proteins,Proteins: metabolism,Saccharomyces cerevisiae Proteins,Saccharomyces cerevisiae Proteins: metabolism,Signal Transduction,Signal Transduction: physiology,printed},
mendeley-tags = {printed},
month = nov,
number = {17},
pages = {3013--20},
pmid = {15180928},
title = {{Protein complex prediction via cost-based clustering.}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/15180928},
volume = {20},
year = {2004}
}
@book{Kogan2007,
abstract = {There is a growing need for a more automated system of partitioning data sets into groups, or clusters. For example, digital libraries and the World Wide Web continue to grow exponentially, the ability to find useful information increasingly depends on the indexing infrastructure or search engine. Clustering techniques can be used to discover natural groups in data sets and to identify abstract structures that might reside there, without having any background knowledge of the characteristics of the data. Clustering has been used in a variety of areas, including computer vision, VLSI design, data mining, bio-informatics (gene expression analysis), and information retrieval, to name just a few. This book focuses on a few of the most important clustering algorithms, providing a detailed account of these major models in an information retrieval context. The beginning chapters introduce the classic algorithms in detail, while the later chapters describe clustering through divergences and show recent research for more advanced audiences.},
author = {Kogan, Jacob},
booktitle = {Advances},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Kogan - 2007 - Introduction to Clustering Large and High-Dimensional Data.pdf:pdf},
isbn = {0521617936},
pages = {222},
publisher = {Cambridge University Press},
title = {{Introduction to Clustering Large and High-Dimensional Data}},
url = {http://books.google.com/books?hl=en\&amp;lr=\&amp;id=AdfSSGncSlwC\&amp;oi=fnd\&amp;pg=PR11\&amp;dq=Introduction+to+Clustering+Large+and+High-Dimensional+Data\&amp;ots=r0wKzv1dT2\&amp;sig=gF6RAHYp9mVp5LEgXOObwyqgjC0},
year = {2007}
}
@book{Kogan2006,
abstract = {Clustering is one of the most fundamental and essential data analysis techniques. Clustering can be used as an independent data mining task to discern intrinsic characteristics of data, or as a preprocessing step with the clustering results then used for classification, correlation analysis, or anomaly detection. Kogan and his co-editors have put together recent advances in clustering large and high-dimension data. Their volume addresses new topics and methods which are central to modern data analysis, with particular emphasis on linear algebra tools, opimization methods and statistical techniques. The contributions, written by leading researchers from both academia and industry, cover theoretical basics as well as application and evaluation of algorithms, and thus provide an excellent state-of-the-art overview. The level of detail, the breadth of coverage, and the comprehensive bibliography make this book a perfect fit for researchers and graduate students in data mining and in many other important related application areas.},
author = {Kogan, Jacob and Nicholas, C and Teboulle, M},
booktitle = {Springer New York},
chapter = {A Survey o},
editor = {Kogan, Jacob and Nicholas, Charles and Teboulle, Marc},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Kogan, Nicholas, Teboulle - 2006 - Grouping Multidimensional Data Recent Advances in Clustering.pdf:pdf},
isbn = {354028348X},
publisher = {Springer Berlin Heidelberg},
title = {{Grouping Multidimensional Data: Recent Advances in Clustering}},
url = {http://scholar.google.com/scholar?hl=en\&btnG=Search\&q=intitle:Grouping+Multidimensional+Data:+Recent+Advances+in+Clustering\#1},
year = {2006}
}
@inproceedings{Korde2011,
abstract = {The memory of modern computer is layered in a hierarchy, top to bottom primary cache, secondary cache, main memory, virtual memory and distributed memory with more levels to come in the future. Our goal is to automatically achieve that of fine-tuned algorithms on a multi-level memory hierarchy. This automatically is because cache oblivious algorithms no knowledge about any capacity and block size of each level of the hierarchy. In this paper an efficient techniques is proposed to manage cache memory. The new technique uses block recursive structure of two types only. The algorithm is tested on famous problem of matrix multiplication. It avoids jumps and cache misses are reduced to the order of N3 √L√M.},
annote = {The quality of the article is unacceptable 
      },
author = {Korde, P. S. and Khanale, P. B.},
booktitle = {2011 IEEE Recent Advances in Intelligent Computational Systems},
doi = {10.1109/RAICS.2011.6069378},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Korde, Khanale - 2011 - Recursive storage cache memory for matrix multiplication.pdf:pdf},
isbn = {978-1-4244-9477-4},
keywords = {printed},
mendeley-tags = {printed},
month = sep,
pages = {581--586},
publisher = {IEEE},
title = {{Recursive storage cache memory for matrix multiplication}},
url = {http://ieeexplore.ieee.org/xpl/freeabs\_all.jsp?arnumber=6069378},
year = {2011}
}
@article{Krfil1995,
abstract = {A simple algorithm for multiplication of sparse matrices is proposed. This algorithm can be easily incorporate into existing matrix multiplication routines. Behavior of the given algorithm on scalar and vector processors is discussed.},
author = {Kr\'{a}l, Daniel and Neogr\'{a}dy, Pavel and Kell\"{o}, Vladimir},
doi = {10.1016/0010-4655(94)00120-Q},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Kr\'{a}l, Neogr\'{a}dy, Kell\"{o} - 1995 - Simple sparse matrix multiplication algorithm.pdf:pdf},
issn = {00104655},
journal = {Computer Physics Communications},
month = feb,
number = {2},
pages = {213--216},
title = {{Simple sparse matrix multiplication algorithm}},
url = {http://linkinghub.elsevier.com/retrieve/pii/001046559400120Q},
volume = {85},
year = {1995}
}
@inproceedings{Krishnan,
abstract = {In many applications, matrix multiplication involves different shapes of matrices. The shape of the matrix can significantly impact the performance of matrix multiplication algorithm. This paper describes extensions of the SRUMMA parallel matrix multiplication algorithm (Krishnan and Nieplocha, 2004) to improve performance of transpose and rectangular matrices. Our approach relies on a set of hybrid algorithms which are chosen based on the shape of matrices and transpose operator involved. The algorithm exploits performance characteristics of clusters and shared memory systems: it differs from the other parallel matrix multiplication algorithms by the explicit use of shared memory and remote memory access (RMA) communication rather than message passing. The experimental results on clusters and shared memory systems demonstrate consistent performance advantages over pdgemm from the ScaLAPACK parallel linear algebra package.},
author = {Krishnan, Manojkumar and Nieplocha, Jarek},
booktitle = {Proceedings. 10th International Conference on Parallel and Distributed Systems, 2004. ICPADS 2004.},
doi = {10.1109/ICPADS.2004.1316103},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Krishnan, Nieplocha - 2004 - Optimizing parallel multiplication operation for rectangular and transposed matrices.pdf:pdf},
isbn = {0-7695-2152-5},
keywords = {printed},
mendeley-tags = {printed},
pages = {257--266},
publisher = {IEEE},
title = {{Optimizing parallel multiplication operation for rectangular and transposed matrices}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=1316103},
year = {2004}
}
@article{Krishnan2006,
author = {Krishnan, Manojkumar and Nieplocha, Jarek},
doi = {10.1145/1128022.1128054},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Krishnan, Nieplocha - 2006 - Memory efficient parallel matrix multiplication operation for irregular problems.pdf:pdf},
isbn = {1595933026},
journal = {Proceedings of the 3rd conference on Computing frontiers CF 06},
keywords = {distribution,global arrays,irregular,parallel linear algebra,parallel matrix multiplication,parallel programming,printed,remote memory access,srumma},
mendeley-tags = {printed},
pages = {229},
publisher = {ACM Press},
series = {CF '06},
title = {{Memory efficient parallel matrix multiplication operation for irregular problems}},
url = {http://portal.acm.org/citation.cfm?doid=1128022.1128054},
year = {2006}
}
@inproceedings{Krommer1998,
abstract = {The Numerical Algorithms Group Ltd is currently participating in the European HPCN Fourth Framework project on Parallel I ndustrial NumErical Applications and Portable Libraries PINEAPL. One of the main goals of the project is to increase the suitability of the existing NAG Parallel Library for dealing with computationally intensive industrial applications by appropriately extending the range of library routines Additionally several industrial applications are being ported onto parallel computers within the PINEAPL project by replacing sequential code sections with calls to appropriate parallel library routines A substantial part of the library material being developed is concerned with the solution of PDE problems using parallel sparse linear algebra modules These modules provide support for crucial computational tasks such as graph partitioning preconditioning and iterative solution of linear systems Additional support routines assist users in distributing and assembling the data structures used and or generated by the sparse linear algebra modules This paper provides a number of performance results which demonstrate the e ciency and scalability of core computational routines in particular the iterative solver the preconditioner and the matrix-vector multiplication routines Most of the software described in this paper has been incorporated into the recently launched Release of the PINEAPL Library},
author = {Krommer, Arnold},
booktitle = {Proceedings of Euro-Par},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Krommer - 1998 - Parallel Sparse Matrix Computations Using the PINEAPL Library A Performance Study.pdf:pdf},
pages = {804--841},
title = {{Parallel Sparse Matrix Computations Using the PINEAPL Library: A Performance Study}},
url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.46.4923},
year = {1998}
}
@article{Kruger2003,
abstract = {In this work, the emphasis is on the development of strategies to realize techniques of numerical computing on the graphics chip. In particular, the focus is on the acceleration of techniques for solving sets of algebraic equations as they occur in numerical simulation. We introduce a framework for the implementation of linear algebra operators on programmable graphics processors (GPUs), thus providing the building blocks for the design of more complex numerical algorithms. In particular, we propose a stream model for arithmetic operations on vectors and matrices that exploits the intrinsic parallelism and efficient communication on modern GPUs. Besides performance gains due to improved numerical computations, graphics algorithms benefit from this model in that the transfer of computation results to the graphics processor for display is avoided. We demonstrate the effectiveness of our approach by implementing direct solvers for sparse matrices, and by applying these solvers to multi-dimensional finite difference equations, i.e. the 2D wave equation and the incompressible Navier-Stokes equations.},
author = {Kr\"{u}ger, Jens and Westermann, R\"{u}diger},
doi = {10.1145/882262.882363},
isbn = {1581137095},
issn = {07300301},
journal = {ACM Transactions on Graphics},
keywords = {graphics hardware,numerical simulation},
number = {3},
pages = {908},
publisher = {ACM},
series = {SIGGRAPH '03},
title = {{Linear algebra operators for GPU implementation of numerical algorithms}},
url = {http://portal.acm.org/citation.cfm?doid=882262.882363},
volume = {22},
year = {2003}
}
@article{Kruskal1989,
abstract = {New techniques are presented for the manipulation of sparse matrices on parallel MIMD computers. We consider the following problems: matrix addition, matrix multiplication, row and column permutation, matrix transpose, matrix vector multiplication, and Gaussian elimination.},
author = {Kruskal, Clyde P. and Rudolph, Larry and Snir, Marc},
doi = {10.1016/0304-3975(89)90058-3},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Kruskal, Rudolph, Snir - 1989 - Techniques for parallel manipulation of sparse matrices.pdf:pdf},
issn = {03043975},
journal = {Theoretical Computer Science},
month = may,
number = {2},
pages = {135--157},
title = {{Techniques for parallel manipulation of sparse matrices}},
url = {http://dx.doi.org/10.1016/0304-3975(89)90058-3},
volume = {64},
year = {1989}
}
@article{kumar2010b,
abstract = {We implement an algorithm for k-clustering for small k in fixed dimensions and report experimental results here. Although the theoretical bounds on the running time are hopeless for 1 + ∊ approximating k-clusters, we note that for dimensions 2 and 3, k-clustering is practical for small k (k ≤ 4) and simple enough shapes. For the purposes of this paper, k is a small fixed constant.},
author = {Kumar, Pankaj and Kumar, Piyush},
doi = {10.1142/S0218195910003372},
issn = {0218-1959},
journal = {International Journal of Computational Geometry \& Applications},
keywords = {clustering,core-sets,k-center,optimization},
number = {04},
pages = {431},
title = {{Almost optimal solutions to k-clustering problems}},
url = {http://www.worldscinet.com/ijcga/20/2004/S0218195910003372.html},
volume = {20},
year = {2010}
}
@unpublished{Kumar2002,
abstract = {This thesis deals with problems at the intersection of computational geometry,
optimization, graphics, and machine learning. Geometric clustering is one such prob-
lem we explore. We develop fast approximation algorithms for clustering problems
like the k-center problem and minimum enclosing ellipsoid problem based on the idea
of core sets. We also explore an application of the 1-center problem to recognition of
people based on their hand outlines.
Another problem we consider in this thesis is how to reconstruct curves and sur-
faces from given sample points. We show implementations of algorithms that can
handle noise for reconstructing curves in two dimensions. Based on Delaunay trian-
gulations, we develop a surface reconstructor for a given set of sample points in three
dimensions.
When dealing with massive data sets, it is important to consider the eect of
memory hierarchies on algorithms. We explore this problem in our research on cache
oblivious algorithms. We develop a practical cache oblivious algorithm to compute
Delaunay triangulations of large point sets. We end the thesis with another opti-
mization problem of approximately nding large empty convex bodies inside closed
objects under various assumptions.},
address = {New York},
author = {Kumar, Piyush},
institution = {Stone Brook},
keywords = {Lecture Notes},
pages = {103},
title = {{Clustering and reconstructing large data sets}},
year = {2002}
}
@unpublished{Piyush2000,
abstract = {The cache oblivious model is a simple and elegant model to design algorithms that perform well in hierarchical memory models ubiquitous on current systems. This model was rst formulated in [22] and has since been a topic of intense research. Analyzing and designing algorithms and data structures in this model involves not only an asymptotic analysis of the number of steps executed in terms of the input size, but also the movement of data optimally among the dierent levels of the memory hierarchy. This chapter is aimed as an introduction to the $\backslash$ideal-cache" model of [22] and techniques used to design cache oblivious algorithms. The chapter also presents some experimental insights and results.},
author = {Kumar, Piyush},
pages = {36},
title = {{Cache oblivous algorithms - Theory and Practice}},
year = {2000}
}
@techreport{Kumar1999,
abstract = {We present an algorithm that provably recon- structs a curve in the framework introduced by Amenta, Bern and Eppstein. The highlights of the algorithm are: (i) it is simple, (ii) it requires a sam- pling density better than previously known, (iii) it can be adapted for curve reconstruction in higher dimensions straightforwardly.},
author = {Kumar, Piyush and Ghosh, Subir Kumar},
pages = {8},
title = {{A Simple Polygon Triangulation Algorithm}},
url = {http://www.compgeom.com/~piyush/papers/mypaper.pdf},
year = {1999}
}
@book{Kumar2003a,
abstract = {The cache oblivious model is a simple and elegant model to design algorithms that perform well in hierarchical memory models ubiquitous on current systems. This model was first formulated in [321] and has since been a topic of intense research. Analyzing and designing algorithms and data structures in this model involves not only an asymptotic analysis of the number of steps executed in terms of the input size, but also the movement of data optimally among the different levels of the memory hierarchy. This chapter is aimed as an introduction to the “ideal-cache” model of [321] and techniques used to design cache oblivious algorithms. The chapter also presents some experimental insights and results.},
address = {Berlin, Heidelberg},
author = {Kumar, Piyush and Meyer, Ulrich and Sanders, Peter and Sibeyn, Jop},
doi = {10.1007/3-540-36574-5},
editor = {Meyer, Ulrich and Sanders, Peter and Sibeyn, Jop},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Kumar et al. - 2003 - Algorithms for Memory Hierarchies.pdf:pdf;:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Kumar et al. - 2003 - Algorithms for Memory Hierarchies(2).pdf:pdf},
isbn = {978-3-540-00883-5},
keywords = {Computer Science},
month = feb,
pages = {193--212},
publisher = {Springer Berlin Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{Algorithms for Memory Hierarchies}},
url = {http://www.springerlink.com/content/j05ttrjbrhujc81j/},
volume = {2625},
year = {2003}
}
@article{Kumar2003,
abstract = {We study the minimum enclosing ball (MEB) problem for sets of points or balls in high dimensions. Using techniques of second-order cone programming and "core-sets", we have developed (1 + )-approximation algorithms that perform well in practice, ...},
author = {Kumar, Piyush and Mitchell, Joseph S. B. and Yildirim, E Alper},
doi = {10.1145/996546.996548},
issn = {10846654},
journal = {Journal of Experimental Algorithmics},
pages = {1.1},
title = {{Approximate minimum enclosing balls in high dimensions using core-sets}},
url = {http://portal.acm.org/citation.cfm?doid=996546.996548},
volume = {8},
year = {2003}
}
@article{Kumar2005,
author = {Kumar, Piyush and Yildirim, E Alper},
doi = {10.1007/s10957-005-2653-6},
issn = {00223239},
journal = {Journal of Optimization Theory and Applications},
number = {1},
pages = {1--21},
publisher = {Citeseer},
title = {{Minimum-Volume Enclosing Ellipsoids and Core Sets}},
url = {http://www.springerlink.com/index/10.1007/s10957-005-2653-6},
volume = {126},
year = {2005}
}
@article{Kumar2007,
abstract = {Given a set of points =x1xmRn and $\epsilon$>0, we propose and analyze an algorithm for the problem of computing a (1+$\epsilon$)-approximation to the minimum-volume axis-aligned ellipsoid enclosing . We establish that our algorithm is polynomial for fixed $\epsilon$. In addition, the algorithm returns a small core set , whose size is independent of the number of points m, with the property that the minimum-volume axis-aligned ellipsoid enclosing is a good approximation of the minimum-volume axis-aligned ellipsoid enclosing . Our computational results indicate that the algorithm exhibits significantly better performance than the theoretical worst-case complexity estimate},
author = {Kumar, Piyush and Yildirim, E Alper},
doi = {10.1007/s10957-007-9295-9},
issn = {00223239},
journal = {Journal of Optimization Theory and Applications},
keywords = {approximation algorithms,axis aligned ellipsoids,core sets,enclosing ellipsoids},
number = {2},
pages = {211--228},
title = {{Computing Minimum-Volume Enclosing Axis-Aligned Ellipsoids}},
url = {http://www.springerlink.com/index/10.1007/s10957-007-9295-9},
volume = {136},
year = {2007}
}
@article{Kumar2009,
abstract = {Given a set of m points in n-dimensional space with corresponding positive weights, the weighted Euclidean one-center problem, which is a generalization of the minimum enclosing ball problem, involves the computation of a point that minimizes the maximum weighted Euclidean distance from to each point in . In this paper, given ϵ > 0, we propose and analyze an algorithm that computes a (1 + ϵ)-approximate solution to the weighted Euclidean one-center problem. Our algorithm explicitly constructs a small subset , called an ϵ-core set of , for which the optimal solution of the corresponding weighted Euclidean one-center problem is a close approximation to that of . In addition, we establish that depends only on ϵ and on the ratio of the smallest and largest weights, but is independent of the number of points m and the dimension n. This result subsumes and generalizes the previously known core set results for the minimum enclosing ball problem. Our algorithm computes a (1 + ϵ)-approximate solution to the weighted Euclidean one-center problem for in arithmetic operations. Our computational results indicate that the size of the ϵ-core set computed by the algorithm is, in general, significantly smaller than the theoretical worst-case estimate, which contributes to the efficiency of the algorithm, especially for large-scale instances. We shed some light on the possible reasons for this discrepancy between the theoretical estimate and the practical performance.},
author = {Kumar, Piyush and Yildirim, E Alper},
doi = {10.1287/ijoc.1080.0315},
issn = {10919856},
journal = {INFORMS Journal on Computing},
keywords = {approximation,core sets,minimum enclosing balls,weighted euclidean one center problem},
number = {4},
pages = {614--629},
title = {{An Algorithm and a Core Set Result for the Weighted Euclidean One-Center Problem}},
url = {http://joc.journal.informs.org/cgi/doi/10.1287/ijoc.1080.0315},
volume = {21},
year = {2009}
}
@inproceedings{Kurzak2007,
abstract = {Linear algebra algorithms commonly encapsulate parallelism in Basic Linear Algebra Subroutines (BLAS). This solution relies on the fork-join model of parallel execution, which may result in suboptimal performance on current and future generations of multi-core processors. To overcome the shortcomings of this approach a pipelined model of parallel execution is presented, and the idea of look ahead is utilized in order to suppress the negative effects of sequential formulation of the algorithms. Application to one-sided matrix factorizations, LU, Cholesky and QR, is described. Shared memory implementation using POSIX threads is presented.},
address = {Berlin, Heidelberg},
author = {Kurzak, Jakub and Dongarra, Jack and K\aa gstr\"{o}m, Bo and Elmroth, Erik and Wasniewski, Jerzy},
booktitle = {PARA'06 Proceedings of the 8th international conference on Applied parallel computing: state of the art in scientific computingcomputing},
doi = {10.1007/978-3-540-75755-9\_18},
editor = {K\aa gstr\"{o}m, Bo and Elmroth, Erik and Dongarra, Jack and Wa\'{s}niewski, Jerzy},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Kurzak et al. - 2007 - Implementing Linear Algebra Routines on Multi-core Processors with Pipelining and a Look Ahead.pdf:pdf},
isbn = {978-3-540-75754-2},
keywords = {Computer Science,printed},
mendeley-tags = {printed},
pages = {147--156},
publisher = {Springer Berlin Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{Implementing Linear Algebra Routines on Multi-core Processors with Pipelining and a Look Ahead}},
url = {http://www.springerlink.com/index/10.1007/978-3-540-75755-9},
volume = {4699},
year = {2007}
}
@article{Ladner2002,
abstract = {An experimental comparison of cache aware and cache oblivious static search tree algorithms is presented. Both cache aware and cache oblivious algorithms outperform classic binary search on large data sets because of their better utilization of cache memory. Cache aware algorithms with implicit pointers perform best overall, but cache oblivious algorithms do almost as well and do not have to be tuned to the memory block size as cache aware algorithms require. Program instrumentation techniques are used to compare the cache misses and instruction counts for implementations of these algorithms.},
author = {Ladner, R and Fortna, Ray and Nguyen, B H},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Ladner, Fortna, Nguyen - 2002 - A Comparison of Cache Aware and Cache Oblivious Static Search Trees Using Program Instrumentation.pdf:pdf},
journal = {Experimental Algorithmics},
keywords = {Cache Aware,Cache Oblivious,Search Trees,not printed},
mendeley-tags = {not printed},
pages = {78--92},
publisher = {Springer},
title = {{A Comparison of Cache Aware and Cache Oblivious Static Search Trees Using Program Instrumentation}},
url = {http://www.springerlink.com/index/8601GARJWFD4UYFC.pdf},
year = {2002}
}
@article{Lam1992,
author = {Lam, Tak and Tiwari, Prasoon and Tompa, Martin},
doi = {10.1016/0022-0000(92)90028-H},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Lam, Tiwari, Tompa - 1992 - Trade-offs between communication and space.pdf:pdf},
issn = {00220000},
journal = {Journal of Computer and System Sciences},
keywords = {printed},
mendeley-tags = {printed},
month = dec,
number = {3},
pages = {296--315},
title = {{Trade-offs between communication and space}},
url = {http://dl.acm.org/citation.cfm?id=171523.171527},
volume = {45},
year = {1992}
}
@article{Larsen1997,
address = {Berlin, Heidelberg},
author = {Larsen, Kim S. and Soisalon-Soininen, Eljas and Widmayer, Peter},
doi = {10.1007/3-540-63307-3},
editor = {Dehne, Frank and Rau-Chaplin, Andrew and Sack, J\"{o}rg-R\"{u}diger and Tamassia, Roberto},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Larsen, Soisalon-Soininen, Widmayer - 1997 - Relaxed balance through standard rotations.pdf:pdf},
isbn = {978-3-540-63307-5},
journal = {Algorithms and Data Structures},
month = apr,
pages = {450--461},
publisher = {Springer Berlin Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{Relaxed balance through standard rotations}},
url = {http://www.springerlink.com/index/10.1007/3-540-63307-3},
volume = {1272},
year = {1997}
}
@article{Latek2008,
abstract = {Background: Several different methods for contact prediction succeeded within the Sixth Critical Assessment of Techniques for Protein Structure Prediction (CASP6). The most relevant were non-local contact predictions for targets from the most difficult categories: fold recognition-analogy and new fold. Such contacts could provide valuable structural information in case a template structure cannot be found in the PDB. Results: We described comprehensive tests of the effectiveness of contact data in various aspects of de novo modeling with CABS, an algorithm which was used successfully in CASP6 by the Kolinski-Bujnicki group. We used the predicted contacts in a simple scoring function for the post-simulation ranking of protein models and as a soft bias in the folding simulations and in the fold-refinement procedure. The latter approach turned out to be the most successful. The CABS force field used in the Replica Exchange Monte Carlo simulations cooperated with the true contacts and discriminated the false ones, which resulted in an improvement of the majority of Kolinski-Bujnicki's protein models. In the modeling we tested different sets of predicted contact data submitted to the CASP6 server. According to our results, the best performing were the contacts with the accuracy balanced with the coverage, obtained either from the best two predictors only or by a consensus from as many predictors as possible. Conclusion: Our tests have shown that theoretically predicted contacts can be very beneficial for protein structure prediction. Depending on the protein modeling method, a contact data set applied should be prepared with differently balanced coverage and accuracy of predicted contacts. Namely, high coverage of contact data is important for the model ranking and high accuracy for the folding simulations.},
author = {Latek, Dorota and Kolinski, Andrzej},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Latek, Kolinski - 2008 - Contact prediction in protein modeling Scoring, folding and refinement of coarse-grained models.pdf:pdf},
institution = {Faculty of Chemistry, University of Warsaw, Pasteura 1, 02-093 Warsaw, Poland. pledor@chem.uw.edu.pl},
journal = {BMC Structural Biology},
number = {36},
pages = {36},
publisher = {BioMed Central},
title = {{Contact prediction in protein modeling: Scoring, folding and refinement of coarse-grained models}},
url = {http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=2527566\&tool=pmcentrez\&rendertype=abstract},
volume = {8},
year = {2008}
}
@article{Lee2010,
abstract = {As neuroimaging algorithms and technology continue to grow faster than CPU performance in complexity and image resolution, data-parallel computing methods will be increasingly important. The high performance, data-parallel architecture of modern graphical processing units (GPUs) can reduce computational times by orders of magnitude. However, its massively threaded architecture introduces challenges when GPU resources are exceeded. This paper presents optimization strategies for compute- and memory-bound algorithms for the CUDA architecture. For compute-bound algorithms, the registers are reduced through variable reuse via shared memory and the data throughput is increased through heavier thread workloads and maximizing the thread configuration for a single thread block per multiprocessor. For memory-bound algorithms, fitting the data into the fast but limited GPU resources is achieved through reorganizing the data into self-contained structures and employing a multi-pass approach. Memory latencies are reduced by selecting memory resources whose cache performance are optimized for the algorithm's access patterns. We demonstrate the strategies on two computationally expensive algorithms and achieve optimized GPU implementations that perform up to 6 faster than unoptimized ones. Compared to CPU implementations, we achieve peak GPU speedups of 129 for the 3D unbiased nonlinear image registration technique and 93 for the non-local means surface denoising algorithm.},
author = {Lee, Daren and Dinov, Ivo and Dong, Bin and Gutman, Boris and Yanovsky, Igor and Toga, Arthur W},
doi = {10.1016/j.cmpb.2010.10.013},
issn = {18727565},
journal = {Computer Methods and Programs in Biomedicine},
pmid = {21159404},
title = {{CUDA BLAS}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/21159404},
year = {2010}
}
@article{Leiserson1998,
author = {Leiserson, Charles E and Prokop, Harald},
doi = {10.1.1.38.6620},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Leiserson, Prokop - 1998 - A Minicourse on Multithreaded Programming.pdf:pdf},
journal = {Technology},
keywords = {printed},
mendeley-tags = {printed},
pages = {1--13},
title = {{A Minicourse on Multithreaded Programming}},
url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.38.6620\&amp;rep=rep1\&amp;type=pdf},
year = {1998}
}
@article{Leiserson2010,
author = {Leiserson, Charles E and Schardl, Tao B},
doi = {10.1145/1810479.1810534},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Leiserson, Schardl - 2010 - A work-efficient parallel breadth-first search algorithm (or how to cope with the nondeterminism of reducers).pdf:pdf},
isbn = {9781450300797},
journal = {Intelligence},
keywords = {breadth first search,cilk,graph algorithms,hyperobjects,nondeterminism,parallel algorithms,reducers,threading},
pages = {303--314},
publisher = {ACM},
title = {{A work-efficient parallel breadth-first search algorithm (or how to cope with the nondeterminism of reducers)}},
url = {http://portal.acm.org/citation.cfm?id=1810534},
year = {2010}
}
@inproceedings{Leng2010,
abstract = {Many real applications are required to detect outliers in high dimensional data sets. The major difficulty of mining outliers lies on the fact that outliers are often embedded in subspaces. No efficient methods are available in general for subspace-based outlier detection. Most existing subspacebased outlier detection methods identify outliers by searching for abnormal sparse density units in subspaces. In this paper, we present a novel approach for finding outliers in the ‘interesting’ subspaces. The interesting subspaces are strongly correlated with `good' clusters. This approach aims to group the meaningful subspaces and then identify outliers in the projected subspaces. In doing so, an extension to the subspacebased clustering algorithm is proposed so as to find the ‘good’ subspaces, and then outliers are identified in the projected subspaces using some classical outlier detection techniques such as distance-based and density-based algorithms. Comprehensive case studies are conducted using various types of subspace clustering and outlier detection algorithms. The experimental results demonstrate that the proposed method can detect outliers effectively and efficiently in high dimensional data sets.},
address = {Chengdu},
author = {Leng, Jinsong},
booktitle = {ICCEE 2010},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Leng - 2010 - A Novel Subspace Outlier Detection Approach in High Dimensional Data Sets.pdf:pdf},
keywords = {not printed},
mendeley-tags = {not printed},
pages = {162--165},
title = {{A Novel Subspace Outlier Detection Approach in High Dimensional Data Sets}},
url = {http://ro.ecu.edu.au/ecuworks/6339},
year = {2010}
}
@book{Lesk2005,
abstract = {Written by a pioneer of the use of bioinformatics in research, the second edition of Introduction to Bioinformatics introduces the student to the power of bioinformatics as a set of scientific tools. Retaining and enhancing the rich pedagogy and lucid presentation of the first edition, this new edition explains how to access the data archives of genomes and proteins, and the kind of questions these data and tools can answer. It also discusses how to make inferences from the data archives, how to make connections among them, and how to derive useful and interesting predictions. The book is accompanied by a fully integrated companion website},
author = {Lesk, Arthur M.},
edition = {2},
isbn = {9780199277872},
keywords = {Biology,Molecular Biology,Proteins},
language = {English},
pages = {378},
publisher = {Oxford University Press},
title = {{Introduction to Bioinformatics}},
year = {2005}
}
@article{Levitt2007,
abstract = {Contrary to popular assumption, the rate of growth of structural data has slowed, and the Protein Data Bank (PDB) has not been growing exponentially since 1995. Reaching such a dramatic conclusion requires careful measurement of growth of novel structures, which can be achieved by clustering entry sequences, or by using a novel index to down-weight entries with a higher number of sequence neighbors. These measures agree, and growth rates are very similar for entire PDB files, clusters, and weighted chains. The overall sizes of Structural Classification of Proteins (SCOP) categories (number of families, superfamilies, and folds) appear to be directly proportional to the number of deposited PDB files. Using our weighted chain count, which is most correlated to the change in the size of each SCOP category in any time period, shows that the rate of increase of SCOP categories is actually slowing down. This enables the final size of each of these SCOP categories to be predicted without examining or comparing protein structures. In the last 3 years, structures solved by structural genomics (SG) initiatives, especially the United States National Institutes of Health Protein Structure Initiative, have begun to redress the slowing growth of the PDB. Structures solved by SG are 3.8 times less sequence-redundant than typical PDB structures. Since mid-2004, SG programs have contributed half the novel structures measured by weighted chain counts. Our analysis does not rely on visual inspection of coordinate sets: it is done automatically, providing an accurate, up-to-date measure of the growth of novel protein structural data.},
author = {Levitt, Michael},
institution = {Department of Structural Biology, Stanford University School of Medicine, Stanford, CA 94305-5126, USA. michael.levitt@stanford.edu},
journal = {Proceedings of the National Academy of Sciences of the United States of America},
keywords = {amino acid sequence,cluster analysis,databases,protein,protein statistics \& numerical data,protein trends,proteins,proteins chemistry,proteins classification,proteomics,proteomics trends,sequence homology},
number = {9},
pages = {3183--3188},
publisher = {National Academy of Sciences},
title = {{Growth of novel protein structural data}},
url = {http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=1802002\&tool=pmcentrez\&rendertype=abstract},
volume = {104},
year = {2007}
}
@techreport{LexisNexisRiskSolutions2011,
author = {{LexisNexis Risk Solutions}},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/LexisNexis Risk Solutions - 2011 - HPCC Systems Models for Big Data.pdf:pdf},
institution = {LexisNexis},
pages = {17},
title = {{HPCC Systems: Models for Big Data}},
url = {http://hpccsystems.com/community/white-papers},
year = {2011}
}
@article{Li2011,
author = {Li, Feifei and Yao, Bin and Kumar, Piyush},
journal = {IEEE Transactions on Knowledge and Data Engineering},
pages = {1--15},
title = {{Group Enclosing Queries}},
year = {2011}
}
@article{Li1997,
author = {Li, J. and Skjellum, A. and Falgout, R. D.},
doi = {10.1002/(SICI)1096-9128(199705)9:5<345::AID-CPE258>3.0.CO;2-7},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Li, Skjellum, Falgout - 1997 - A poly-algorithm for parallel dense matrix multiplication on two-dimensional process grid topologies.pdf:pdf},
issn = {1040-3108},
journal = {Concurrency: Practice and Experience},
month = may,
number = {5},
pages = {345--389},
title = {{A poly-algorithm for parallel dense matrix multiplication on two-dimensional process grid topologies}},
url = {http://doi.wiley.com/10.1002/(SICI)1096-9128(199705)9:5<345::AID-CPE258>3.0.CO;2-7},
volume = {9},
year = {1997}
}
@inproceedings{Li2010,
abstract = {As the complexity of current computer architecture increases, domain-specific program generators are extensively used to implement performance portable libraries. Dynamic programming is a performance-critical kernel in many applications including engineering operations and bioinformatics. In this paper, we propose an Automatically Tuned Dynamic Programming (ATDP) to optimize performance of dynamic programming algorithm across various architectures. First, an algorithm-by-blocks for dynamic programming is designed to facilitate optimizing with well-known techniques including cache and register tiling. Further, the parameterized algorithm-by-blocks is cooperative with an auto-tuning framework and leverages a hill climbing algorithm to search the possible best program on a given platform. The experiments on two ×86 processors demonstrate that (i) the generated scalar programs improve performance by over 10 times, (ii) the vector programs further speedup the scalar ones by a factor of 4 and 2 for single-precision and double-precision, respectively.},
author = {Li, Jiajia and Tan, Guangming and Chen, Mingyu},
booktitle = {2010 IEEE 16th International Conference on Parallel and Distributed Systems},
doi = {10.1109/ICPADS.2010.117},
isbn = {978-1-4244-9727-0},
month = dec,
pages = {452--459},
publisher = {IEEE},
title = {{Automatically Tuned Dynamic Programming with an Algorithm-by-Blocks}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5695635},
year = {2010}
}
@phdthesis{Li1996,
abstract = {In this thesis, we presented several new and generalized parallel dense matrix multiplication algorithms of the form C = AB + C on two-dimensional process grid topologies. We classied these algorithms coherently into three categories according to the communication primitives used and thus oered a taxonomy for this set of related algorithms. All of the algorithms are implemented using the data-distribution-independent approach and the algorithmic compatibility condition ensures the correctness of matrix multiplication. We hypothesized that no single algorithm always achieves the best performance for multiplying matrices with dierent sizes on arbitrary process grids. Thus, a practical approach to resolve this dilemma is to use poly-algorithms. Both the performance models for these algorithms and the experimental results on the IBM SP2 system supported the research hypothesis. Furthermore, we provided initial heuristics for the polyalgorithmic selection for parallel dense matrix multiplication},
author = {Li, Jin},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Li - 1996 - A poly-algorithm for parallel dense matrix multiplication on two-dimensional process grid topologies.pdf:pdf},
keywords = {not printed},
mendeley-tags = {not printed},
pages = {104},
school = {Mississippi State University},
title = {{A poly-algorithm for parallel dense matrix multiplication on two-dimensional process grid topologies}},
type = {Master Thesis},
year = {1996}
}
@inproceedings{Li2005,
author = {Li, Keqin},
booktitle = {19th IEEE International Parallel and Distributed Processing Symposium},
doi = {10.1109/IPDPS.2005.221},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Li - 2005 - Fast and Scalable Parallel Matrix Computations on Distributed Memory Systems.pdf:pdf},
isbn = {0-7695-2312-9},
month = apr,
pages = {8b--8b},
publisher = {IEEE},
title = {{Fast and Scalable Parallel Matrix Computations on Distributed Memory Systems}},
url = {http://dl.acm.org/citation.cfm?id=1053727.1054343 http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=1419823},
year = {2005}
}
@inproceedings{Li2000,
author = {Li, Keqin},
booktitle = {IPDPS 2000 Workshops on Parallel and Distributed Processing},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Li - 2000 - Fast and Scalable Parallel Matrix Computations with Optical Buses.pdf:pdf},
isbn = {3-540-67442-X},
keywords = {printed},
mendeley-tags = {printed},
month = may,
pages = {1053--1062},
publisher = {Springer-Verlag London},
title = {{Fast and Scalable Parallel Matrix Computations with Optical Buses}},
url = {http://dl.acm.org/citation.cfm?id=645612.662687},
year = {2000}
}
@article{Keqin1998,
abstract = {We present efficient parallel matrix multiplication algorithms for linear arrays with reconfigurable pipelined bus systems (LARPBS). Such systems are able to support a large volume of parallel communication of various patterns in constant time. An LARPBS can also be reconfigured into many independent subsystems and, thus, is able to support parallel implementations of divide-and-conquer computations like Strassen's algorithm. The main contributions of the paper are as follows. We develop five matrix multiplication algorithms with varying degrees of parallelism on the LARPBS computing model; namely, MM1, MM 2, MM3, and compound algorithms C1($\epsilon$)and C2($\delta$). Algorithm C1($\epsilon$) has adjustable time complexity in sublinear level. Algorithm C2($\delta$) implies that it is feasible to achieve sublogarithmic time using $\sigma$(N3) processors for matrix multiplication on a realistic system. Algorithms MM3, C1($\epsilon$), and C2($\delta$) all have o(\&Nscr;3) cost and, hence, are very processor efficient. Algorithms MM1, MM3, and C1($\epsilon$) are general-purpose matrix multiplication algorithms, where the array elements are in any ring. Algorithms MM2 and C2($\delta$) are applicable to array elements that are integers of bounded magnitude, or floating-point values of bounded precision and magnitude, or Boolean values. Extension of algorithms MM 2 and C2($\delta$) to unbounded integers and reals are also discussed},
author = {Li, Keqin and Yi, Pan and {Si Qing}, Zheng},
doi = {10.1109/71.706044},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Li, Yi, Si Qing - 1998 - Fast and processor efficient parallel matrix multiplication algorithms on a linear array with a reconfigurable pipelined bus system.pdf:pdf},
issn = {10459219},
journal = {IEEE Transactions on Parallel and Distributed Systems},
keywords = {printed},
mendeley-tags = {printed},
number = {8},
pages = {705--720},
title = {{Fast and processor efficient parallel matrix multiplication algorithms on a linear array with a reconfigurable pipelined bus system}},
url = {http://ieeexplore.ieee.org/xpl/freeabs\_all.jsp?arnumber=706044},
volume = {9},
year = {1998}
}
@article{Li2004,
abstract = {Document clustering has long been an important problem in information retrieval. In this paper, we present a new clustering algorithm ASI1, which uses explicitly modeling of the subspace structure associated with each cluster. ASI simultaneously performs data reduction and subspace identification via an iterative alternating optimization procedure. Motivated from the optimization procedure, we then provide a novel method to determine the number of clusters. We also discuss the connections of ASI with various existential clustering approaches. Finally, extensive experimental results on real data sets show the effectiveness of ASI algorithm.},
author = {Li, T and Ma, S and Ogihara, M},
doi = {10.1145/1008992.1009031},
editor = {Jarvelin, K and Allen, J and Bruza, P and Sanderson, M},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Li, Ma, Ogihara - 2004 - Document clustering via adaptive subspace iteration.pdf:pdf},
isbn = {1581138814},
journal = {Proceedings of the 27th annual international conference on Research and development in information retrieval SIGIR 04},
keywords = {adaptive subspace identification,alternating,document clustering,factor analysis,optimization},
pages = {218--225},
publisher = {ACM Press},
title = {{Document clustering via adaptive subspace iteration}},
url = {http://portal.acm.org/citation.cfm?doid=1008992.1009031},
year = {2004}
}
@article{Lian2010,
author = {Lian, Hao and Lianbayloredu, Hao and Songbayloredu, Chengsen},
journal = {2010 IEEE International Conference on Bioinformatics and Biomedicine},
keywords = {graph clustering,interactome,networks,protein interaction,protein protein interactions},
pages = {585--589},
title = {{Decomposing Protein Interactome Networks by Graph Entropy}},
year = {2010}
}
@article{Lin2003,
abstract = {We have proposed the extended Karnaugh map representation (EKMH) scheme for multidimensional array representation. We propose two data compression schemes, EKMR compressed row/column storage (ECRS/ECCS), for multidimensional sparse arrays based on the EKMR scheme. To evaluate the proposed schemes, we compare them to the CRS/CCS schemes. Both theoretical analysis and experimental tests were conducted. In the theoretical analysis, we analyze the CRS/CCS and the ECRS/ECCS schemes in terms of the time complexity, the space complexity, and the range of their usability for practical applications. In experimental tests, we compare the compressing time of sparse arrays and the execution time of matrix-matrix addition and matrix-matrix multiplication based on the CRS/CCS and the ECRS/ECCS schemes. The theoretical analysis and experimental results show that the ECRS/ECCS schemes are superior to the CRS/CCS schemes for all the evaluated criteria, except the space complexity in some case.},
author = {Lin, Chun-Yuan and Chung, Yeh-Ching},
doi = {10.1109/TC.2003.1252859},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Lin, Chung - 2003 - Efficient data compression methods for multidimensional sparse array operations based on the ekmr scheme.pdf:pdf},
issn = {0018-9340},
journal = {IEEE Transactions on Computers},
keywords = {printed},
mendeley-tags = {printed},
month = dec,
number = {12},
pages = {1640--1646},
publisher = {Published by the IEEE Computer Society},
title = {{Efficient data compression methods for multidimensional sparse array operations based on the ekmr scheme}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=1252859},
volume = {52},
year = {2003}
}
@inproceedings{Lin,
abstract = {For sparse array operations, in general, the sparse arrays are compressed by some data compression schemes in order to obtain better performance. The Compressed Row/Column Storage (CRS/CCS) schemes are the two common used data compression schemes for sparse arrays in the traditional matrix representation (TMR). When extended to higher dimensional sparse arrays, array operations using the CRS/CCS schemes usually do not perform well. We propose two data compression schemes, extended Karnaugh map representation Compressed Row/Column Storage (ECRS/ ECCS) for multi-dimensional sparse arrays based on the EKMR scheme. To evaluate the proposed schemes, both theoretical analysis and experimental tests are conducted. In theoretical analysis, we analyze CRS/CCS and ECRS/ECCS schemes in terms of the time complexity, the space complexity, and the range of their usability for practical applications. In experimental test, we compare the performance of matrix-matrix addition and matrix-matrix multiplication sparse array operations that use the CRS/CCS and ECRS/ECCS schemes. The experimental results show that sparse array operations based on the ECRS/ECCS schemes outperform those based on the CRS/CCS schemes for all test samples.},
author = {Lin, Chun-Yuan and Chung, Yeh-Ching and Liu, Jen-Shiuh},
booktitle = {First International Symposium on Cyber Worlds, 2002. Proceedings.},
doi = {10.1109/CW.2002.1180861},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Lin, Chung, Liu - 2002 - Efficient data compression methods for multi-dimensional sparse array operations.pdf:pdf},
isbn = {0-7695-1862-1},
keywords = {printed},
mendeley-tags = {printed},
pages = {62--69},
publisher = {IEEE Comput. Soc},
title = {{Efficient data compression methods for multi-dimensional sparse array operations}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=1180861},
year = {2002}
}
@article{Lin1943,
abstract = {In this paper, we give several properties related to highly connected graph. Based on these properties, we give a redefinition of highly connected subgraph which results in an algorithm for determining whether a given graph is highly connected in linear time. Then we present a computationally efficient algorithm, called MOHCS, for mining overlapping highly connected subgraphs. We experimentally evaluate the performance of MOHCS using a variety of real and synthetic data sets. Our results show that MOHCS is effective in finding overlapping highly connected subgraphs both in computer- generated graph and yeast protein network.},
author = {Lin, Xiahong and Gao, Lin and Chen, Kefei},
doi = {10.1109/ICBBE.2008.127},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Lin, Gao, Chen - 2008 - A Clustering Algorithm for Mining Overlapping Highly Connected Subgraphs.pdf:pdf},
isbn = {9781424417476},
journal = {2nd International Conference on Bioinformatics and Biomedical Engineering},
keywords = {algorithms,clustering,highly connected subgraph,minimum cut,minimum degree,not printed},
mendeley-tags = {not printed},
pages = {523--526},
publisher = {Ieee},
title = {{A Clustering Algorithm for Mining Overlapping Highly Connected Subgraphs}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4535007},
year = {2008}
}
@article{Littau2009,
abstract = {A scalable method to cluster data sets too large to fit in memory is presented. This method does not depend on random subsampling, but does scan every individual data sample in a deterministic way. The original data are represented in factored form by the product of two matrices, one or both of which is very sparse. This factored form avoids the need to multiply together these two matrices by using a variant of the Principal Direction Divisive Partitioning (PDDP) algorithm which does not depend on computing the distances between the individual samples. The resulting clustering algorithm is Piecemeal PDDP (PMPDDP), in which the original data are broken up into sections which will fit into memory and clustered. The cluster centers are used to create approximations to the original data items, and each original data item is represented by a linear combination of these centers. We evaluate the performance of PMPDDP on three real data sets, and observe that the quality of the clusters of PMPDDP is comparable to PDDP for the data sets examined.},
author = {Littau, David and Boley, Daniel},
doi = {10.1111/j.1467-8640.2009.00331.x},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Littau, Boley - 2009 - Clustering very large data sets using a low memory matrix factored representation.pdf:pdf},
issn = {08247935},
journal = {Computational Intelligence},
keywords = {PDDP,clustering,decomposition,information-retrieval,large data sets,low-memory factorization,printed},
mendeley-tags = {printed},
month = may,
number = {2},
pages = {114--135},
title = {{Clustering very large data sets using a low memory matrix factored representation}},
url = {http://apps.webofknowledge.com/full\_record.do?product=WOS\&search\_mode=Refine\&qid=6\&SID=V1dGdjIgFanpbLh1Kbe\&page=1\&doc=10 http://doi.wiley.com/10.1111/j.1467-8640.2009.00331.x},
volume = {25},
year = {2009}
}
@unpublished{Litvinov2010,
abstract = {This is a survey paper on applications of mathematics of semirings to numerical analysis and computing. Concepts of universal algorithm and generic program are discussed. Relations between these concepts and mathematics of semirings are examined. A very brief introduction to mathematics of semirings (including idempotent and tropical mathematics) is presented. Concrete applications to optimization problems, idempotent linear algebra and interval analysis are indicated. It is known that some nonlinear problems (and especially optimization problems) become linear over appropriate semirings with idempotent addition (the so-called idempotent superposition principle). This linearity over semirings is convenient for parallel computations.},
archivePrefix = {arXiv},
arxivId = {1005.1252},
author = {Litvinov, G. L. and Maslov, V. P. and Rodionov, A. Ya. and Sobolevski, A. N.},
eprint = {1005.1252},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Litvinov et al. - 2010 - Universal algorithms, mathematics of semirings and parallel computations.pdf:pdf},
month = may,
pages = {36},
title = {{Universal algorithms, mathematics of semirings and parallel computations}},
url = {http://arxiv.org/abs/1005.1252},
year = {2010}
}
@book{Liu2008,
abstract = {Due to increasing demands for dimensionality reduction, research on feature selection has deeply and widely expanded into many fields, including computational statistics, pattern recognition, machine learning, data mining, and knowledge discovery. Highlighting current research issues, Computational Methods of Feature Selection introduces the basic concepts and principles, state-of-the-art algorithms, and novel applications of this tool.
The book begins by exploring unsupervised, randomized, and causal feature selection. It then reports on some recent results of empowering feature selection, including active feature selection, decision-border estimate, the use of ensembles with independent probes, and incremental feature selection. This is followed by discussions of weighting and local methods, such as the ReliefF family, k-means clustering, local feature relevance, and a new interpretation of Relief. The book subsequently covers text classification, a new feature selection score, and both constraint-guided and aggressive feature selection. The final section examines applications of feature selection in bioinformatics, including feature construction as well as redundancy-, ensemble-, and penalty-based feature selection.

Through a clear, concise, and coherent presentation of topics, this volume systematically covers the key concepts, underlying principles, and inventive applications of feature selection, illustrating how this powerful tool can efficiently harness massive, high-dimensional data and turn it into valuable, reliable information.},
editor = {Liu, Huan and Motoda, Hiroshi},
isbn = {978‑1‑58488‑878‑9},
pages = {411},
publisher = {Chapman \& Hall/CRC},
title = {{Computational Methods of Feature Selection}},
year = {2008}
}
@book{Luenberger2008,
abstract = {This third edition of the classic textbook in Optimization has been fully revised and updated. It comprehensively covers modern theoretical insights in this crucial computing area, and will be required reading for analysts and operations researchers in a variety of fields. The book connects the purely analytical character of an optimization problem, and the behavior of algorithms used to solve it. Now, the third edition has been completely updated with recent Optimization Methods. The book also has a new co-author, Yinyu Ye of California’s Stanford University, who has written lots of extra material including some on Interior Point Methods.},
author = {Luenberger, David G. and Ye, Yinyu},
edition = {3},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Luenberger, Ye - 2008 - Linear and Nonlinear Programming.pdf:pdf;:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Luenberger, Ye - 2008 - Linear and Nonlinear Programming.djvu:djvu},
isbn = {0387745025},
pages = {508},
publisher = {Springer},
title = {{Linear and Nonlinear Programming}},
url = {http://www.amazon.com/Nonlinear-Programming-International-Operations-Management/dp/0387745025},
year = {2008}
}
@incollection{Luthy1992,
author = {L\"{u}thy, Roland and Eisenberg, David},
booktitle = {Sequence Analysis Primer},
chapter = {2},
editor = {Gribskov, Michael and Devereux, John},
isbn = {9780195098747},
pages = {61--87},
publisher = {Oxford University Press},
title = {{Protein}},
year = {1992}
}
@article{Luxburg2007,
abstract = {In recent years, spectral clustering has become one of the most popular modern clustering algorithms. It is simple to implement, can be solved efficiently by standard linear algebra software, and very often outperforms traditional clustering algorithms such as the k-means algorithm. On the first glance spectral clustering appears slightly mysterious, and it is not obvious to see why it works at all and what it really does. The goal of this tutorial is to give some intuition on those questions. We describe different graph Laplacians and their basic properties, present the most common spectral clustering algorithms, and derive those algorithms from scratch by several different approaches. Advantages and disadvantages of the different spectral clustering algorithms are discussed.},
author = {Luxburg, Ulrike},
doi = {10.1007/s11222-007-9033-z},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Luxburg - 2007 - A tutorial on spectral clustering.pdf:pdf},
issn = {0960-3174},
journal = {Statistics and Computing},
keywords = {algorithms,connectivity,edge,eigenvectors,graph laplacian,graphs,printed,reduction,resistance distance,sparse matrices,spectral clustering},
mendeley-tags = {printed},
month = aug,
number = {4},
pages = {395--416},
title = {{A tutorial on spectral clustering}},
url = {http://apps.webofknowledge.com/full\_record.do?product=WOS\&search\_mode=Refine\&qid=6\&SID=V1dGdjIgFanpbLh1Kbe\&page=1\&doc=8 http://www.springerlink.com/index/10.1007/s11222-007-9033-z},
volume = {17},
year = {2007}
}
@article{Macropol2009,
abstract = {We propose an efficient and biologically sensitive algorithm based on repeated random walks (RRW) for discovering functional modules, e.g., complexes and pathways, within large-scale protein networks. Compared to existing cluster identification techniques, RRW implicitly makes use of network topology, edge weights, and long range interactions between proteins.},
author = {Macropol, Kathy and Can, Tolga and Singh, Ambuj K},
doi = {10.1186/1471-2105-10-283},
issn = {1471-2105},
journal = {BMC Bioinformatics},
keywords = {Algorithms,Cluster Analysis,Computational Biology,Computational Biology: methods,Databases,Genome,Protein,Protein Interaction Mapping,Proteins,Proteins: genetics,Proteins: metabolism,Saccharomyces cerevisiae,Saccharomyces cerevisiae: genetics,Saccharomyces cerevisiae: metabolism},
month = jan,
pages = {283},
pmid = {19740439},
title = {{RRW: repeated random walks on genome-scale protein networks for local cluster discovery.}},
url = {http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=2748087\&tool=pmcentrez\&rendertype=abstract},
volume = {10},
year = {2009}
}
@article{Magen2010,
abstract = {In this paper we develop algorithms for approximating matrix multiplication with respect to the spectral norm. Let AinRR ntimes m and BinRR n times p be two matrices and eps>0. We approximate the product A top B using two down-sampled sketches, tildeAinRR ttimes m and tildeBinRR ttimes p, where tll n such that normtildeA top tildeB - A top B leq eps normAnormB with high probability. We use two different sampling procedures for constructing tildeA and tildeB; one of them is done by i.i.d. non-uniform sampling rows from A and B and the other is done by taking random linear combinations of their rows. We prove bounds that depend only on the intrinsic dimensionality of A and B, that is their rank and their stable rank; namely the squared ratio between their Frobenius and operator norm. For achieving bounds that depend on rank we employ standard tools from high-dimensional geometry such as concentration of measure arguments combined with elaborate eps-net constructions. For bounds that depend on the smaller parameter of stable rank this technology itself seems weak. However, we show that in combination with a simple truncation argument is amenable to provide such bounds. To handle similar bounds for row sampling, we develop a novel matrix-valued Chernoff bound inequality which we call low rank matrix-valued Chernoff bound. Thanks to this inequality, we are able to give bounds that depend only on the stable rank of the input matrices...},
author = {Magen, Avner and Zouzias, Anastasios},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Magen, Zouzias - 2010 - Low Rank Matrix-Valued Chernoff Bounds and Approximate Matrix Multiplication.pdf:pdf},
journal = {Matrix},
pages = {15},
title = {{Low Rank Matrix-Valued Chernoff Bounds and Approximate Matrix Multiplication}},
url = {http://arxiv.org/abs/1005.2724},
year = {2010}
}
@book{Manber1989,
abstract = {This book emphasizes the creative aspects of algorithm design by examining steps used in the process of algorithm development. The heart of the creative process lies in an analogy between proving mathematical theorems by induction and designing combinatorial algorithms. The book contains hundreds of problems and examples. It is designed to enhance the reader's problem-solving abilities and understanding of the principles behind algorithm design.},
author = {Manber, Udi},
edition = {1},
isbn = {978-0201120370},
pages = {478},
publisher = {Addison-Wesley},
title = {{Introduction to Algorithms - A Creative Approach}},
year = {1989}
}
@article{Manerikar2009,
abstract = {The problem of detecting frequent items in streaming data is relevant to many different applications across many domains. Several algorithms, diverse in nature, have been proposed in the literature for the solution of the above problem. In this paper, we review these algorithms, and we present the results of the first extensive comparative experimental study of the most prominent algorithms in the literature. The algorithms were comprehensively tested using a common test framework on several real and synthetic datasets. Their performance with respect to the different parameters (i.e., parameters intrinsic to the algorithms, and data related parameters) was studied. We report the results, and insights gained through these experiments.},
author = {Manerikar, Nishad and Palpanas, Themis},
doi = {10.1016/j.datak.2008.11.001},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Manerikar, Palpanas - 2009 - Frequent items in streaming data An experimental evaluation of the state-of-the-art.pdf:pdf},
issn = {0169023X},
journal = {Data \& Knowledge Engineering},
keywords = {data streams,frequent items,stream mining},
month = apr,
number = {4},
pages = {415--430},
title = {{Frequent items in streaming data: An experimental evaluation of the state-of-the-art}},
url = {http://dx.doi.org/10.1016/j.datak.2008.11.001},
volume = {68},
year = {2009}
}
@article{Manku2002,
author = {Manku, Gurmeet Singh and Motwani, Rajeev},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Manku, Motwani - 2002 - Approximate Frequency Counts over Data Streams.pdf:pdf},
journal = {VLDB},
pages = {346 -- 357},
title = {{Approximate Frequency Counts over Data Streams}},
url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.19.8594},
year = {2002}
}
@article{Mavinahalli2010,
abstract = {The N terminal transactivation domain of p53 is regulated by ligases and coactivator proteins. The functional conformation of this region appears to be an alpha helix which is necessary for its appropriate interactions with several proteins including MDM2 and p300. Folding simulation studies have been carried out to examine the propensity and stability of this region and are used to understand the differences between the family members with the ease of helix formation following the order p53 > p73 > p63. It is clear that hydrophobic clusters control the kinetics of helix formation, while electrostatic interactions control the thermodynamic stability of the helix. Differences in these interactions between the family members may partially account for the differential binding to, and regulation by, MDM2 (and MDMX). Phosphorylations of the peptides further modulate the stability of the helix and control associations with partner proteins.},
author = {Mavinahalli, Jagadeesh N and Madhumalar, Arumugam and Beuerman, Roger W and Lane, David P and Verma, Chandra},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Mavinahalli et al. - 2010 - Differences in the transactivation domains of p53 family members a computational study.pdf:pdf},
institution = {Bioinformatics Institute (A-STAR), Matrix, Singapore. jagadeesh@bii.a-star.edu.sg},
journal = {BMC Genomics},
number = {Suppl 1},
pages = {S5},
publisher = {BioMed Central},
title = {{Differences in the transactivation domains of p53 family members: a computational study}},
url = {http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=2822533\&tool=pmcentrez\&rendertype=abstract},
volume = {11},
year = {2010}
}
@article{McCallum2000,
abstract = {Many important problems involve clustering large datasets. Although naive implementations of clustering are computationally expensive, there are established efficient techniques for clustering when the dataset has either (1) a limited number of clusters, (2) a low feature dimensionality, or (3) a small number of data points. However, there has been much less work on methods of efficiently clustering datasets that are large in all three ways at once, for example, having millions of data points that exist in many thousands of dimensions representing many thousands of clusters. We present a new technique for clustering these large, high-dimensional datasets. The key idea involves using a cheap, approximate distance measure to efficiently divide the data into overlapping subsets we call canopies. Then clustering is performed by measuring exact distances only between points that occur in a common canopy. Using canopies, large clustering problems that were formerly impossible become practical. Under reasonable assumptions about the cheap distance metric, this reduction in computational cost comes without any loss in clustering accuracy. Canopies can be applied to many domains and used with a variety of clustering approaches, including Greedy Agglomerative Clustering, K-means and Expectation-Maximization. We present experimental results on grouping bibliographic citations from the reference sections of research papers. Here the canopy approach reduces computation time over a traditional clustering approach by more than an order of magnitude and decreases error in comparison to a previously used algorithm by 25\%.},
author = {McCallum, Andrew and Nigam, Kamal and Ungar, Lyle H},
doi = {10.1145/347090.347123},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/McCallum, Nigam, Ungar - 2000 - Efficient clustering of high-dimensional data sets with application to reference matching.pdf:pdf},
institution = {ACM},
isbn = {1581132336},
journal = {Proceedings of the 6th ACM SIGKDD international conference on Knowledge discovery and data mining KDD 00},
pages = {169--178},
publisher = {ACM Press},
title = {{Efficient clustering of high-dimensional data sets with application to reference matching}},
url = {http://portal.acm.org/citation.cfm?doid=347090.347123},
year = {2000}
}
@article{McColl1999,
author = {McColl, W. F. and Tiskin, Alexander},
doi = {10.1007/PL00008264},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/McColl, Tiskin - 1999 - Memory-Efficient Matrix Multiplication in the BSP Model.pdf:pdf},
issn = {0178-4617},
journal = {Algorithmica},
month = jul,
number = {3-4},
pages = {287--297},
title = {{Memory-Efficient Matrix Multiplication in the BSP Model}},
url = {http://www.citeulike.org/user/dimap/article/6350211},
volume = {24},
year = {1999}
}
@article{McSherry2001,
abstract = {Problems such as bisection, graph coloring, and clique are generally believed hard in the worst case. However, they can be solved if the input data is drawn randomly from a distribution over graphs containing acceptable solutions. In this paper we show that a simple spectral algorithm can solve all three problems above in the average case, as well as a more general problem of partitioning graphs based on edge density. In nearly all cases our approach meets or exceeds previous parameters, while introducing substantial generality. We apply spectral techniques, using foremost the observation that in all of these problems, the expected adjacency matrix is a low rank matrix wherein the structure of the solution is evident.},
author = {McSherry, F},
doi = {10.1109/SFCS.2001.959929},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/McSherry - 2001 - Spectral Partitioning of Random Graphs.pdf:pdf},
isbn = {0769511163},
journal = {Proceedings 2001 IEEE International Conference on Cluster Computing},
keywords = {printed},
mendeley-tags = {printed},
pages = {529--537},
publisher = {IEEE Comput. Soc},
title = {{Spectral Partitioning of Random Graphs}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=959929},
year = {2001}
}
@book{Mehlhorn1999,
abstract = {In the core computer science areas--data structures, graph and network algorithms, and computational geometry--LEDA is the first library to cover all material found in the standard textbooks. Written in C++ and freely available worldwide on a variety of hardware, the software is installed at hundreds of sites. This book, written by the main authors of LEDA, is the definitive account of how the system operates and how it can be used. The authors supply plentiful examples from a range of areas to show practical uses of the library, making the book essential for all researchers in algorithms, data structures and computational geometry.},
author = {Mehlhorn, Kurt and N\"{a}her, Stefan},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Mehlhorn, N\"{a}her - 1999 - LEDA A Platform for Combinatorial and Geometric Computing.pdf:pdf},
isbn = {9780521563291},
pages = {1034},
publisher = {Cambridge University Press},
title = {{LEDA: A Platform for Combinatorial and Geometric Computing}},
year = {1999}
}
@book{Messer1994,
abstract = {This text is designed to resolve the conflict between the abstractions of linear algebra and the needs and abilities of the students who may have dealt only briefly with the theoretical aspects of previous mathematics courses. The author recognizes that many students will at first feel uncomfortable, or at least unfamiliar, with the theoretical nature inherent in many of the topics in linear algebra. Numerous discussions of the logical structure of proofs, the need to translate terminology into notation, and suggestions about efficient ways to discover a proof are included. This text combines the many simple and elegant results of elementary linear algebra with some powerful computational techniques to demonstrate that theorectical mathematics need not be difficult, mysterious, or useless. This book is written for the second course in linear algebra (or the first course, if the instructor is receptive to this approach).},
author = {Messer, Robert},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Messer - 1994 - Linear Algebra Gateway to Mathematics.djvu:djvu},
isbn = {978-0065017281},
keywords = {Eigenvalues,Eigenvectors,Linear Algebra,Matrices,Vector Spaces},
mendeley-tags = {Eigenvalues,Eigenvectors,Linear Algebra,Matrices,Vector Spaces},
pages = {560},
publisher = {Harper Collins College Publishers},
title = {{Linear Algebra: Gateway to Mathematics}},
url = {http://www.amazon.com/Linear-Algebra-Mathematics-Robert-Messer/dp/0065017285},
year = {1994}
}
@article{Metwally2005,
abstract = {We propose an integrated approach for solving both problems of finding the most popular k elements, and finding frequent elements in a data stream. Our technique is efficient and exact if the alphabet under consideration is small. In the more practical large alphabet case, our solution is space efficient and reports both top-k and frequent elements with tight guarantees on errors. For general data distributions, our top-k algorithm can return a set of k′ elements, where k′ ≈ k, which are guaranteed to be the top-k’ elements; and we use minimal space for calculating frequent elements. For realistic Zipfian data, our space requirement for the frequent elements problem decreases dramatically with the parameter of the distribution; and for top-k queries, we ensure that only the top-k elements, in the correct order, are reported. Our experiments show significant space reductions with no loss in accuracy.},
address = {Berlin, Heidelberg},
author = {Metwally, Ahmed and Agrawal, Divyakant and {El Abbadi}, Amr},
doi = {10.1007/b104421},
editor = {Eiter, Thomas and Libkin, Leonid},
isbn = {978-3-540-24288-8},
journal = {Database Theory},
pages = {398--412},
publisher = {Springer Berlin Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{Efficient Computation of Frequent and Top-k Elements in Data Streams}},
url = {http://www.springerlink.com/index/10.1007/b104421},
volume = {3363},
year = {2005}
}
@inproceedings{Meyer2007,
abstract = {As data repositories grow larger, it becomes increasingly difficult to transmit a large volume of data and handle several simultaneous data requests. One solution is to use a cluster of workstations for data storage. The challenge, however, is to balance the system load, since these requests may appear and change continuously. In this paper, a new method for load balancing requests on such large data sets is developed. The motivation for our method is systems where large geological data sets are rendered in real-time by a homogeneous computational cluster. The goal is to expand this system to accommodate multiple simultaneous clients. Our method assumes that the large input sets may be examined in advance, and uses simple, continuous functions to approximate the discrete costs associated with each data element. Finally, we show that partitioning a data set using our method involves very little overhead.},
address = {Berlin, Heidelberg},
author = {Meyer, Jan and Elster, Anne and K\aa gstr\"{o}m, Bo and Elmroth, Erik and Dongarra, Jack and Wasniewski, Jerzy},
booktitle = {PARA'06 Proceedings of the 8th international conference on Applied parallel computing: state of the art in scientific computingcomputing},
doi = {10.1007/978-3-540-75755-9},
editor = {K\aa gstr\"{o}m, Bo and Elmroth, Erik and Dongarra, Jack and Wa\'{s}niewski, Jerzy},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Meyer et al. - 2007 - A Load Balancing Strategy for Computations on Large, Read-Only Data Sets.pdf:pdf},
isbn = {978-3-540-75754-2},
keywords = {Computer Science,printed},
mendeley-tags = {printed},
pages = {198--207},
publisher = {Springer Berlin Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{A Load Balancing Strategy for Computations on Large, Read-Only Data Sets}},
url = {http://www.springerlink.com/content/u4uu7u732652641j/},
volume = {4699},
year = {2007}
}
@techreport{Middleton2011a,
author = {Middleton, Anthony M.},
institution = {LexisNexis Risk Solutions},
pages = {45},
title = {{HPCC Systems: Introduction to HPCC (High-Performance Computing Cluster)}},
url = {http://hpccsystems.com/community/white-papers},
year = {2011}
}
@book{Mirkin2005,
abstract = {Often considered more as an art than a science, the field of clustering has been dominated by learning through examples and by techniques chosen almost through trial-and-error. Even the most popular clustering methods--K-Means for partitioning the data set and Ward's method for hierarchical clustering--have lacked the theoretical attention that would establish a firm relationship between the two methods and relevant interpretation aids. Rather than the traditional set of ad hoc techniques, Clustering for Data Mining: A Data Recovery Approach presents a theory that not only closes gaps in K-Means and Ward methods, but also extends them into areas of current interest, such as clustering mixed scale data and incomplete clustering. The author suggests original methods for both cluster finding and cluster description, addresses related topics such as principal component analysis, contingency measures, and data visualization, and includes nearly 60 computational examples covering all stages of clustering, from data pre-processing to cluster validation and results interpretation. This author's unique attention to data recovery methods, theory-based advice, pre- and post-processing issues that are beyond the scope of most texts, and clear, practical instructions for real-world data mining make this book ideally suited for virtually all purposes: for teaching, for self-study, and for professional reference.},
author = {Mirkin, Boris},
booktitle = {New York},
edition = {1},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Mirkin - 2005 - Clustering for Data Mining - A Data Recovery Approach.pdf:pdf},
isbn = {1584885343},
pages = {296},
publisher = {Chapman and Hall/CRC},
series = {Computer Science and Data Analysis Series},
title = {{Clustering for Data Mining - A Data Recovery Approach}},
url = {http://www.amazon.com/dp/1584885343},
year = {2005}
}
@article{Mitchell2003,
abstract = {We study the minimum enclosing ball (MEB) problem for sets of points or balls in high dimensions. Using techniques of second-order cone programming and "coresets ", we have developed (1 + \#)-approximation algorithms that perform well in practice, especially for very high dimensions, in addition to having provable guarantees. We prove the existence of core-sets of size O(1/\#), improving the previous bound of O(1/\# ), and we study empirically how the core-set size grows with dimension. We show that our algorithm, which is simple to implement, results in fast computation of nearly optimal solutions for point sets in much higher dimension than previously computable using exact techniques.},
author = {Mitchell, Joseph S. B. and Yildirim, E Alper},
journal = {Workshop on Algorithm Engineering and Experiments},
pages = {1--19},
title = {{Computing Core-Sets and Approximate Smallest Enclosing HyperSpheres in High Dimensions}},
year = {2003}
}
@article{Mitra,
author = {Mitra, Pradipta},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Mitra - 2009 - Entrywise Bounds for Eigenvectors of Random Graphs.pdf:pdf},
journal = {The Electronic Journal of Combinatorics},
keywords = {matrices},
number = {1},
pages = {R131},
title = {{Entrywise Bounds for Eigenvectors of Random Graphs}},
url = {http://apps.webofknowledge.com/full\_record.do?product=WOS\&search\_mode=Refine\&qid=6\&SID=V1dGdjIgFanpbLh1Kbe\&page=1\&doc=7},
volume = {16},
year = {2009}
}
@book{Mitra2003,
abstract = {First title to ever present soft computing approaches and their application in data mining, along with the traditional hard-computing approaches
Addresses the principles of multimedia data compression techniques (for image, video, text) and their role in data mining
Discusses principles and classical algorithms on string matching and their role in data mining},
author = {Mitra, Sushmita and Acharya, Tinku},
isbn = {9780471460541},
language = {English},
pages = {424},
publisher = {John Wiley \& Sons, Inc.},
title = {{Data Mining: Multimedia, Soft Computing, and Bioinformatics}},
year = {2003}
}
@article{Mitzenmacher2011,
abstract = {The Hierarchical Heavy Hitters problem extends the notion of frequent items to data arranged in a hierarchy. This problem has applications to network traffic monitoring, anomaly detection, and DDoS detection. We present a new streaming approximation algorithm for computing Hierarchical Heavy Hitters that has several advantages over previous algorithms. It improves on the worst-case time and space bounds of earlier algorithms, is conceptually simple and substantially easier to implement, offers improved accuracy guarantees, is easily adopted to a distributed or parallel setting, and can be efficiently implemented in commodity hardware such as ternary content addressable memory (TCAMs). We present experimental results showing that for parameters of primary practical interest, our two-dimensional algorithm is superior to existing algorithms in terms of speed and accuracy, and competitive in terms of space, while our one-dimensional algorithm is also superior in terms of speed and accuracy for a more limited range of parameters.},
archivePrefix = {arXiv},
arxivId = {1102.5540},
author = {Mitzenmacher, Michael and Steinke, Thomas and Thaler, Justin},
eprint = {1102.5540},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Mitzenmacher, Steinke, Thaler - 2011 - Hierarchical Heavy Hitters with the Space Saving Algorithm.5540v2:5540v2;:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Mitzenmacher, Steinke, Thaler - 2011 - Hierarchical Heavy Hitters with the Space Saving Algorithm.pdf:pdf},
month = feb,
pages = {22},
title = {{Hierarchical Heavy Hitters with the Space Saving Algorithm}},
url = {http://arxiv.org/abs/1102.5540},
year = {2011}
}
@article{Mocian2009,
annote = {Not relevant. Deals with more classical clustering techniques},
author = {Mocian, Horatiu},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Mocian - 2009 - Survey of Distributed Clustering Techniques.pdf:pdf},
institution = {Imperial College of London},
journal = {horatiumociancom},
keywords = {printed},
mendeley-tags = {printed},
pages = {35},
title = {{Survey of Distributed Clustering Techniques}},
url = {http://scholar.google.com/scholar?start=20\&hl=en\&cites=3804309015039168186\#4},
year = {2009}
}
@book{Modi1988,
address = {Oxford},
author = {Modi, Jagdish J},
isbn = {0198596553 9780198596554 0198596707 9780198596707},
publisher = {Clarendon Press},
title = {{Parallel algorithms and matrix computation}},
year = {1988}
}
@article{Mohanty2010,
abstract = {We analyse some QR decomposition algorithms, and show that the I/O complexity of the tile based algorithm is asymptotically the same as that of matrix multiplication. This algorithm, we show, performs the best when the tile size is chosen so that exactly one tile fits in the main memory. We propose a constant factor improvement, as well as a new recursive cache oblivious algorithm with the same asymptotic I/O complexity. We design Hessenberg, tridiagonal, and bidiagonal reductions that use banded intermediate forms, and perform only asymptotically optimal numbers of I/Os; these are the first I/O optimal algorithms for these problems. In particular, we show that known slab based algorithms for two sided reductions all have suboptimal asymptotic I/O performances, even though they have been reported to do better than the traditional algorithms on the basis of empirical evidence. We propose new tile based variants of multishift QR and QZ algorithms that under certain conditions on the number of shifts, have better seek and I/O complexities than all known variants. We show that techniques like rescheduling of computational steps, appropriate choosing of the blocking parameters and incorporating of more matrix-matrix operations, can be used to improve the I/O and seek complexities of matrix computations.},
author = {Mohanty, Sraban Kumar},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Mohanty - 2010 - IO Efficient Algorithms for Matrix Computations.pdf:pdf},
institution = {INDIAN INSTITUTE OF TECHNOLOGY GUWAHATI},
journal = {arXiv},
title = {{I/O Efficient Algorithms for Matrix Computations}},
url = {http://arxiv.org/abs/1006.1307},
volume = {cs.DS},
year = {2010}
}
@article{Monagan2009,
author = {Monagan, Michael B and Pearce, Roman},
doi = {10.1145/1576702.1576739},
isbn = {9781605586090},
journal = {Proceedings of the 2009 international symposium on Symbolic and algebraic computation ISSAC 09},
number = {July},
pages = {263},
publisher = {ACM Press},
title = {{Parallel Sparse Polynomial Multiplication}},
url = {http://portal.acm.org/citation.cfm?doid=1576702.1576739},
year = {2009}
}
@article{Muramatsu2010,
abstract = {The aim of this paper is to prove the achievability of rate regions for several coding problems by using sparse matrices (with logarithmic column degree) and maximum-likelihood (ML) coding. These problems are the Gel'fand-Pinsker problem, the Wyner-Ziv problem, and the one-helps-one problem (source coding with partial side information at the decoder). To this end, the notion of a hash property for an ensemble of functions is introduced and it is proved that an ensemble of q-ary sparse matrices satisfies the hash property. Based on this property, it is proved that the rate of codes using sparse matrices and ML coding can achieve the optimal rate.},
author = {Muramatsu, Jun and Miyake, Shigeki},
doi = {10.1109/TIT.2010.2043781},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Muramatsu, Miyake - 2010 - Hash Property and Coding Theorems for Sparse Matrices and Maximum-Likelihood Coding.pdf:pdf},
issn = {0018-9448},
journal = {IEEE Transactions on Information Theory},
keywords = {printed},
mendeley-tags = {printed},
month = may,
number = {5},
pages = {2143--2167},
title = {{Hash Property and Coding Theorems for Sparse Matrices and Maximum-Likelihood Coding}},
url = {http://ieeexplore.ieee.org/xpl/freeabs\_all.jsp?arnumber=5452198},
volume = {56},
year = {2010}
}
@book{Myaeng2005,
address = {Berlin, Heidelberg},
doi = {10.1007/b106653},
editor = {Myaeng, Sung Hyon and Zhou, Ming and Wong, Kam-Fai and Zhang, Hong-Jiang},
isbn = {978-3-540-25065-4},
publisher = {Springer Berlin Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{Information Retrieval Technology}},
url = {http://www.springerlink.com/index/10.1007/b106653},
volume = {3411},
year = {2005}
}
@misc{Narayan,
author = {Narayan, Sumit and Thamarakuzhi, Ajithkumar},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Narayan, Thamarakuzhi - 2009 - Performance Characterization of Matrix Multiplication on SGI Altix 3700.pdf:pdf;:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Narayan, Thamarakuzhi - 2009 - Performance Characterization of Matrix Multiplication on SGI Altix 3700(2).pdf:pdf},
title = {{Performance Characterization of Matrix Multiplication on SGI Altix 3700}},
url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.76.6446},
year = {2009}
}
@inproceedings{Natvig2007,
abstract = {HPC users frequently develop and run their MPI programs without optimizing communication, leading to poor performance on clusters connected with regular Gigabit Ethernet. Unfortunately, optimizing communication patterns will often decrease the clarity and ease of modification of the code, and users desire to focus on the application problem and not the tool used to solve it. In this paper, our new method for automatically optimizing any application’s communication is presented. All MPI calls are intercepted by a library we inject into the application. Memory associated with MPI requests is protected using hardware supported memory protection. The request can then continue in the background as an asynchronous operation while the application is allowed to continue as if the request is finished. Once the data is accessed by the application, a page fault will occur, and our injected library will wait for the background transfer to finish before allowing the application to continue. Performance close to that of manual optimization are observed on our test-cases when run on Gigabit Ethernet clusters.},
address = {Berlin, Heidelberg},
author = {Natvig, Thorvald and Elster, Anne and K\aa gstr\"{o}m, Bo and Elmroth, Erik and Dongarra, Jack and Wasniewski, Jerzy},
booktitle = {PARA'06 Proceedings of the 8th international conference on Applied parallel computing: state of the art in scientific computingcomputing},
doi = {10.1007/978-3-540-75755-9},
editor = {K\aa gstr\"{o}m, Bo and Elmroth, Erik and Dongarra, Jack and Wa\'{s}niewski, Jerzy},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Natvig et al. - 2007 - Automatic and Transparent Optimizations of an Application’s MPI Communication.pdf:pdf},
isbn = {978-3-540-75754-2},
keywords = {Computer Science,printed},
mendeley-tags = {printed},
pages = {208--217},
publisher = {Springer Berlin Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{Automatic and Transparent Optimizations of an Application’s MPI Communication}},
url = {http://www.springerlink.com/content/77331158x3613222/},
volume = {4699},
year = {2007}
}
@article{Nemethy1977,
abstract = {This review describes recent advances in studies on the stabilities of the three-dimensional structures of proteins and on the processes leading to the formation of these structures. The term will be used here to denote the process of the conversion of an open polypeptide chain into the unique three-dimensional conformation of the native protein. Experimental and theoretical aspects of protein folding have been reviewed by anfinsen \&amp; Scheraga (1975). In the present article, we emphasize advances made since the writing of that review, together with a brief summary of the background of recent studies.},
author = {N\'{e}methy, G and Scheraga, H A},
journal = {Quarterly Reviews of Biophysics},
number = {3},
pages = {239--252},
pmid = {335427},
publisher = {Alan R. Liss, Inc.},
title = {{Protein folding.}},
url = {http://www.journals.cambridge.org/abstract\_S0033583500002936},
volume = {10},
year = {1977}
}
@misc{NexisNexisRiskSolutions2012,
author = {{NexisNexis Risk Solutions}},
keywords = {Big Data,Enterprise Control Language,High Performance Computing Cluster},
title = {{HPCC Systems}},
url = {http://hpccsystems.com/},
year = {2012}
}
@phdthesis{Ngo2006,
abstract = {The Web contains massive amount of documents from across the globe to the point where it has become impossible to classify them manually. This project’s goal is to find a new method for clustering documents that are as close to humans’ classification as possible and at the same time to reduce the size of the documents. This project uses a combination of Latent Semantic Indexing (LSI) with Singular Value Decomposition (SVD) calculation as well as Support Vector Machine (SVM) classification. With SVD, data sets are decomposed and can be truncated to reduce the data sets size. The reduced data set will then be used to cluster. With SVM, clustered data set is used for training to allow new data to be classified based on SVM’s prediction. The project’s result show that the method of combining SVD and SVM is able to reduce data size and classifies documents reasonably compared to humans’ classification.},
author = {Ngo, Tam P.},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Ngo - 2006 - Clustering High Dimensional Data Using SVM.pdf:pdf},
pages = {50},
school = {San Jose State University},
title = {{Clustering High Dimensional Data Using SVM}},
type = {Master's Theses and Graduate Research},
year = {2006}
}
@incollection{Noel2002,
author = {Noel, S and Raghavan, V and Chu, C H H},
booktitle = {Clustering and Information Retrieval},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Noel, Raghavan, Chu - 2002 - Document clustering, visualization, and retrieval via link mining.pdf:pdf},
keywords = {research.clustering cites.gradu research.ir resear},
pages = {161--193},
publisher = {springer},
series = {Network Theory and Applications},
title = {{Document clustering, visualization, and retrieval via link mining}},
volume = {11},
year = {2002}
}
@article{Olman2009,
abstract = {Large sets of bioinformatical data provide a challenge in time consumption while solving the cluster identification problem, and that is why a parallel algorithm is so needed for identifying dense clusters in a noisy background. Our algorithm works on a graph representation of the data set to be analyzed. It identifies clusters through the identification of densely intraconnected subgraphs. We have employed a minimum spanning tree (MST) representation of the graph and solve the cluster identification problem using this representation. The computational bottleneck of our algorithm is the construction of an MST of a graph, for which a parallel algorithm is employed. Our high-level strategy for the parallel MST construction algorithm is to first partition the graph, then construct MSTs for the partitioned subgraphs and auxiliary bipartite graphs based on the subgraphs, and finally merge these MSTs to derive an MST of the original graph. The computational results indicate that when running on 150 CPUs, our algorithm can solve a cluster identification problem on a data set with 1,000,000 data points almost 100 times faster than on single CPU, indicating that this program is capable of handling very large data clustering problems in an efficient manner. We have implemented the clustering algorithm as the software CLUMP.},
author = {Olman, Victor and Mao, Fenglou and Wu, Hongwei and Xu, Ying},
doi = {10.1109/TCBB.2007.70272},
issn = {15579964},
journal = {IEEEACM transactions on computational biology and bioinformatics IEEE ACM},
keywords = {algorithms,automated,automated methods,cluster analysis,computational biology,computational biology methods,databases,genetic,linear models,multigene family,pattern recognition,reproducibility results,software,systems integration},
number = {2},
pages = {344--52},
pmid = {19407357},
publisher = {IEEE Computer Society},
title = {{Parallel clustering algorithm for large data sets with applications in bioinformatics.}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/19407357},
volume = {6},
year = {2009}
}
@inproceedings{Ostlin2003,
address = {New York, New York, USA},
author = {Ostlin, Anna and Pagh, Rasmus},
booktitle = {Proceedings of the thirty-fifth ACM symposium on Theory of computing - STOC '03},
doi = {10.1145/780542.780633},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Ostlin, Pagh - 2003 - Uniform hashing in constant time and linear space.pdf:pdf},
isbn = {1581136749},
keywords = {data structures,hash function,uniform hashing},
month = jun,
pages = {622},
publisher = {ACM Press},
title = {{Uniform hashing in constant time and linear space}},
url = {http://dl.acm.org/citation.cfm?id=780542.780633},
year = {2003}
}
@article{Pagh2011,
abstract = {Motivated by the problems of computing sample covariance matrices, and of transforming a collection of vectors to a basis where they are sparse, we present a simple algorithm that computes an approximation of the product of two n-by-n real matrices A and B. Let ||AB||\_F denote the Frobenius norm of AB, and b be a parameter determining the time/accuracy trade-off. Given 2-wise independent hash functions \$\_1,h\_2: [n] -> [b], and s\_1,s\_2: [n] -> \{-1,+1\} the algorithm works by first "compressing" the matrix product into the polynomial p(x) = sum\_\{k=1\}\^{}n (sum\_\{i=1\}\^{}n A\_\{ik\} s\_1(i) x\^{}\{h\_1(i)\}) (sum\_\{j=1\}\^{}n B\_\{kj\} s\_2(j) x\^{}\{h\_2(j)\}) Using FFT for polynomial multiplication, we can compute c\_0,...,c\_\{b-1\} such that sum\_i c\_i x\^{}i = (p(x) mod x\^{}b) + (p(x) div x\^{}b) in time \backslash\~{}O(n\^{}2+ n b). An unbiased estimator of (AB)\_\{ij\} with variance at most ||AB||\_F\^{}2 / b can then be computed as: C\_\{ij\} = s\_1(i) s\_2(j) c\_\{(h\_1(i)+h\_2(j)) mod b. Our approach also leads to an algorithm for computing AB exactly, whp., in time \backslash\~{}O(N + nb) in the case where A and B have at most N nonzero entries, and AB has at most b nonzero entries. Also, we use error-correcting codes in a novel way to recover significant entries of AB in near-linear time.}},
archivePrefix = {arXiv},
arxivId = {1108.1320},
author = {Pagh, Rasmus},
eprint = {1108.1320},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Pagh - 2011 - Compressed Matrix Multiplication.pdf:pdf},
journal = {Proceedings of ACM Innovations in Theoretical Computer Science},
keywords = {Computation,Data Structures and Algorithms,Numerical Analysis,printed},
mendeley-tags = {printed},
month = aug,
title = {{Compressed Matrix Multiplication}},
url = {http://arxiv.org/abs/1108.1320},
year = {2011}
}
@article{Parsons2004,
abstract = {Subspace clustering is an extension of traditional clustering that seeks to find clusters in different subspaces within a dataset. Often in high dimensional data, many dimensions are irrelevant and can mask existing clusters in noisy data. Feature selection removes irrelevant and redundant dimensions by analyzing the entire dataset. Subspace clustering algorithms localize the search for relevant dimensions allowing them to find clusters that exist in multiple, possibly overlapping subspaces. There are two major branches of subspace clustering based on their search strategy. Top-down algorithms find an initial clustering in the full set of dimensions and evaluate the subspaces of each cluster, iteratively improving the results. Bottom-up approaches find dense regions in low dimensional spaces and combine them to form clusters. This paper presents a survey of the various subspace clustering algorithms along with a hierarchy organizing the algorithms by their defining characteristics. We then compare the two main approaches to subspace clustering using empirical scalability and accuracy tests and discuss some potential applications where subspace clustering could be particularly useful.},
author = {Parsons, Lance and Haque, Ehtesham and Liu, Huan},
doi = {10.1145/1007730.1007731},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Parsons, Haque, Liu - 2004 - Subspace clustering for high dimensional data a review.pdf:pdf},
institution = {Citeseer},
issn = {19310145},
journal = {SIGKDD Explor Newsl},
number = {1},
pages = {90--105},
publisher = {ACM New York, NY, USA},
title = {{Subspace clustering for high dimensional data: a review}},
url = {http://portal.acm.org/citation.cfm?doid=1007730.1007731},
volume = {6},
year = {2004}
}
@article{Peng2006,
author = {Peng, Yi and Kou, Gang and Chen, Zhengxin and Shi, Yong},
doi = {10.1109/ICSSSM.2006.320794},
isbn = {1424404517},
journal = {2006 International Conference on Service Systems and Service Management},
keywords = {content analysis,data mining field,document clustering},
number = {Dm},
pages = {1653--1659},
publisher = {Ieee},
title = {{Recent trends in Data Mining (DM): Document Clustering of DM Publications}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4114740},
year = {2006}
}
@article{Pham2007,
abstract = {Clustering is concerned with partitioning a data set into homogeneous groups. One of the most popular clustering methods is k-means clustering because of its simplicity and computational efficiency. K-means clustering involves search and optimization. The main problem with this clustering method is its tendency to converge to local optima. The authors’ team have developed a new population based search algorithm called the Bees Algorithm that is capable of locating near optimal solutions efficiently. This paper proposes a clustering method that integrates the simplicity of the k-means algorithm with the capability of the Bees Algorithm to avoid local optima. The paper presents test results to demonstrate the efficacy of the proposed algorithm.},
author = {Pham, D T and Otri, S and Afify, Ashraf and Mahmuddin, M and Al-Jabbouli, Hasan},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Pham et al. - 2007 - Data Clustering Using the Bees Algorithm.pdf:pdf},
journal = {Manufacturing Systems},
title = {{Data Clustering Using the Bees Algorithm}},
url = {http://bees-algorithm.com/modules/2/6.pdf},
year = {2007}
}
@article{Pietracaprina2011,
abstract = {This work explores fundamental modeling and algorithmic issues arising in the well-established MapReduce framework. First, we formally specify a computational model for MapReduce which captures the functional flavor of the paradigm by allowing for a flexible use of parallelism. Indeed, the model diverges from a traditional processor-centric view by featuring parameters which embody only global and local memory constraints, thus favoring a more data-centric view. Second, we apply the model to the fundamental computation task of matrix multiplication presenting upper and lower bounds for both dense and sparse matrix multiplication, which highlight interesting tradeoffs between space and round complexity. Finally, building on the matrix multiplication results, we derive further space-round tradeoffs on matrix inversion and matching.},
archivePrefix = {arXiv},
arxivId = {1111.2228v1},
author = {Pietracaprina, Andrea and Pucci, Geppino and Riondato, Matteo and Silvestri, Francesco and Upfal, Eli},
eprint = {1111.2228v1},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Pietracaprina et al. - 2011 - Space-Round Tradeoffs for MapReduce Computations.pdf:pdf},
journal = {Science},
keywords = {algorithms distributed computing,computing,parallel algorithms,parallel complexity theory,printed,rithms high performance},
mendeley-tags = {printed},
title = {{Space-Round Tradeoffs for MapReduce Computations}},
url = {http://arxiv.org/abs/1111.2228},
year = {2011}
}
@article{Pott2010,
abstract = {The prognostic impact of minimal residual disease (MRD) was analyzed in 259 patients with mantle cell lymphoma (MCL) treated within 2 randomized trials of the European MCL Network (MCL Younger and MCL Elderly trial). After rituximab-based induction treatment, 106 of 190 evaluable patients (56\%) achieved a molecular remission (MR) based on blood and/or bone marrow (BM) analysis. MR resulted in a significantly improved response duration (RD; 87\% vs 61\% patients in remission at 2 years, P = .004) and emerged to be an independent prognostic factor for RD (hazard ratio = 0.4, 95\% confidence interval, 0.1-0.9, P = .028). MR was highly predictive for prolonged RD independent of clinical response (complete response [CR], complete response unconfirmed [CRu], partial response [PR]; RD at 2 years: 94\% in BM MRD-negative CR/CRu and 100\% in BM MRD-negative PR, compared with 71\% in BM MRD-positive CR/CRu and 51\% in BM MRD-positive PR, P = .002). Sustained MR during the postinduction period was predictive for outcome in MCL Younger after autologous stem cell transplantation (ASCT; RD at 2 years 100\% vs 65\%, P = .001) and during maintenance in MCL Elderly (RD at 2 years: 76\% vs 36\%, P = .015). ASCT increased the proportion of patients in MR from 55\% before high-dose therapy to 72\% thereafter. Sequential MRD monitoring is a powerful predictor for treatment outcome in MCL. These trials are registered at www.clinicaltrials.gov as \#NCT00209222 and \#NCT00209209.},
author = {Pott, Christiane and Hoster, Eva and Delfau-Larue, Marie-Helene and Beldjord, Kheira and B\"{o}ttcher, Sebastian and Asnafi, Vahid and Plonquet, Anne and Siebert, Reiner and Callet-Bauchu, Evelyne and Andersen, Niels and van Dongen, Jacques J M and Klapper, Wolfram and Berger, Fran\c{c}oise and Ribrag, Vincent and van Hoof, Achiel L and Trneny, Marek and Walewski, Jan and Dreger, Peter and Unterhalt, Michael and Hiddemann, Wolfgang and Kneba, Michael and Kluin-Nelemans, Hanneke C and Hermine, Olivier and Macintyre, Elizabeth and Dreyling, Martin},
doi = {10.1182/blood-2009-06-230250},
issn = {1528-0020},
journal = {Blood},
keywords = {80 and over,Adult,Aged,Antineoplastic Combined Chemotherapy Protocols,Antineoplastic Combined Chemotherapy Protocols: th,Cell Separation,Combined Modality Therapy,Female,Flow Cytometry,Humans,Immunohistochemistry,Immunotherapy,Immunotherapy: methods,Lymphoma,Male,Mantle-Cell,Mantle-Cell: pathology,Mantle-Cell: therapy,Middle Aged,Neoplasm,Neoplasm Staging,Polymerase Chain Reaction,Prognosis,Radiotherapy,Residual,Treatment Outcome},
month = apr,
number = {16},
pages = {3215--23},
pmid = {20032498},
title = {{Molecular remission is an independent predictor of clinical outcome in patients with mantle cell lymphoma after combined immunochemotherapy: a European MCL intergroup study.}},
url = {http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=2930903\&tool=pmcentrez\&rendertype=abstract},
volume = {115},
year = {2010}
}
@book{Potzsch2006,
abstract = {Analysis of the spatial structure of proteins including folding processes is a challenge for modem bioinformatics. Due to limited experimental access to folding processes, computer simulations are a standard approach. Since realistic continuous (all-atom) simulations are far too expensive, lattice based protein folding simulations are a common coarse-graining. In this paper we present a visualization toolfor lattice based protein folding simulations. The system is based on Shneiderman's mantra "Overview first, zoom and filter details on demand" and uses a collection of information visualization techniques including multiple views, focus+context and table lenses which have been tailored towards our data. We demonstrate the potential of information visualization techniques for providing insight into such simulations.},
author = {Potzsch, S and Scheuermann, Gerik and Wolfinger, Michael T and Flamm, Christoph and Stadler, Peter F},
booktitle = {Information Visualization},
doi = {10.1109/IV.2006.127},
editor = {Banissi, E and Burkhard, R A and Ursyn, A and Zhang, J J and Bannatyne, M W M and Maple, C and Cowell, A J and Tianm, G Y and Hou, M},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Potzsch et al. - 2006 - Visualization of Lattice-Based Protein Folding Simulations.pdf:pdf},
isbn = {0769526020},
keywords = {context,design guidelines,detail,focus,information visualization,multiple views},
pages = {2--7},
publisher = {IEEE},
title = {{Visualization of Lattice-Based Protein Folding Simulations}},
url = {http://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=1648246\&isnumber=34559},
year = {2006}
}
@phdthesis{Prokop1999,
abstract = {This thesis presents “cache-oblivious” algorithms that use asymptotically optimal amounts of work, and move data asymptotically optimally among multiple levels of cache. An algorithm is cache oblivious if no program variables dependent on hardware configuration parameters, such as cache size and cache-line length need to be tuned to minimize the number of cache misses. We show that the ordinary algorithms for matrix transposition, matrix multiplication, sorting, and Jacobi-style multipass filtering are not cache optimal. We present algorithms for rectangular matrix transposition, FFT, sorting, and multipass filters, which are asymptotically optimal on computers with multiple levels of caches. For a cache with size Z and cache-line length L, where Z = (L2), the number of cache misses for an m n matrix transpose is (1 +mnL). The number of cache misses for either an n-point FFT or the sorting of n numbers is (1 +(n=L)(1 +logZn). The cache complexity of computing n time steps of a Jacobi-style multipass filter on an array of size n is (1 +n=L +n2=ZL). We also give an (mnp)-work algorithm to multiply an m n matrix by an n p matrix that incurs (m +n +p +(mn +np +mp)=L +mnp=L p Z)cache misses. We introduce an “ideal-cache” model to analyze our algorithms, and we prove that an optimal cache-oblivious algorithm designed for two levels of memory is also optimal formultiple levels. We further prove that any optimal cache-oblivious algorithm is also optimal in the previously studied HMM and SUMH models. Algorithms developed for these earlier models are perforce cache-aware: their behavior varies as a function of hardware-dependent parameters which must be tuned to attain optimality. Our cache-oblivious algorithms achieve the same asymptotic optimality on all these models, but without any tuning.},
author = {Prokop, Harald},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Prokop - 1999 - Cache-Oblivious Algorithms.pdf:pdf},
pages = {70},
school = {MIT},
title = {{Cache-Oblivious Algorithms}},
year = {1999}
}
@article{Qin2009,
author = {Qin, Guimin and Gao, Lin},
doi = {10.1109/BICTA.2009.5338129},
isbn = {9781424438662},
journal = {2009 4th International on Conference on BioInspired Computing},
keywords = {printed},
mendeley-tags = {printed},
pages = {1--8},
publisher = {Ieee},
title = {{Spectral clustering for detecting protein complexes in PPI networks}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5338129},
year = {2009}
}
@article{Qiu2010,
abstract = {Clouds and MapReduce have shown themselves to be a broadly useful approach to scientific computing especially for parallel data intensive applications. However they have limited applicability to some areas such as data mining because MapReduce has poor performance on problems with an iterative structure present in the linear algebra that underlies much data analysis. Such problems can be run efficiently on clusters using MPI leading to a hybrid cloud and cluster environment. This motivates the design and implementation of an open source Iterative MapReduce system Twister.},
author = {Qiu, Judy and Ekanayake, Jaliya and Gunarathne, Thilina and Choi, Jong Youl and Bae, Seung-Hee and Li, Hui and Zhang, Bingjing and Wu, Tak-Lon and Ruan, Yang and Ekanayake, Saliya and Hughes, Adam and Fox, Geoffrey},
doi = {10.1186/1471-2105-11-S12-S3},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Qiu et al. - 2010 - Hybrid cloud and cluster computing paradigms for life science applications.pdf:pdf},
issn = {1471-2105},
journal = {BMC Bioinformatics},
keywords = {Biological Science Disciplines,Cluster Analysis,Computational Biology,Computational Biology: methods,Data Mining,Metagenomics,Software,printed},
mendeley-tags = {printed},
month = jan,
number = {0},
pages = {S3},
pmid = {21210982},
publisher = {BioMed Central},
title = {{Hybrid cloud and cluster computing paradigms for life science applications.}},
url = {http://www.springerlink.com/content/y21332475k933684/},
volume = {11 Suppl 1},
year = {2010}
}
@article{Rajaraman2011,
abstract = {At the highest level of description, this book is about data mining. However, it focuses on data mining of very large amounts of data, that is, data so large it does not fit in main memory. Because of the emphasis on size, many of our examples are about the Web or data derived from the Web. Further, the book takes an algorithmic point of view: data mining is about applying algorithms to data, rather than using data to train a machine-learning engine of some sort.},
author = {Rajaraman, Anand and Ullman, Jeffrey D},
editor = {Shrey, Gupta and Storus, Mark and Sumbaly, Roshan},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Rajaraman, Ullman - 2011 - Mining of Massive Datasets.pdf:pdf},
issn = {01420615},
journal = {Lecture Notes for Stanford CS345A Web Mining},
number = {3},
pages = {328},
publisher = {Stanford University},
title = {{Mining of Massive Datasets}},
url = {http://infolab.stanford.edu/~ullman/mmds.html},
volume = {67},
year = {2011}
}
@article{Rees1991,
author = {Rees, Stephen A and Black, James P},
doi = {10.1002/spe.4380211005},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Rees, Black - 1991 - An experimental investigation of distributed matrix multiplication techniques.pdf:pdf},
issn = {00380644},
journal = {Software: Practice and Experience},
keywords = {distributed matrix multiplication,loosely coupled},
month = oct,
number = {10},
pages = {1041--1063},
publisher = {John Wiley \& Sons, Ltd.},
title = {{An experimental investigation of distributed matrix multiplication techniques}},
url = {http://doi.wiley.com/10.1002/spe.4380211005},
volume = {21},
year = {1991}
}
@incollection{Rice1992,
author = {Rice, Peter M. and Elliston, Keith and Gribskov, Michael},
booktitle = {Sequence Analysis Primer},
chapter = {1},
editor = {Gribskov, Michael and Devereux, John},
isbn = {9780195098747},
language = {English},
pages = {1--60},
publisher = { Oxford University Press},
title = {{DNA}},
year = {1992}
}
@article{Robic2010,
abstract = {To fully understand the roles proteins play in cellular processes, students need to grasp complex ideas about protein structure, folding, and stability. Our current understanding of these topics is based on mathematical models and experimental data. However, protein structure, folding, and stability are often introduced as descriptive, qualitative phenomena in undergraduate classes. In the process of learning about these topics, students often form incorrect ideas. For example, by learning about protein folding in the context of protein synthesis, students may come to an incorrect conclusion that once synthesized on the ribosome, a protein spends its entire cellular life time in its fully folded native confirmation. This is clearly not true; proteins are dynamic structures that undergo both local fluctuations and global unfolding events. To prevent and address such misconceptions, basic concepts of protein science can be introduced in the context of simple mathematical models and hands-on explorations of publicly available data sets. Ten common misconceptions about proteins are presented, along with suggestions for using equations, models, sequence, structure, and thermodynamic data to help students gain a deeper understanding of basic concepts relating to protein structure, folding, and stability.},
author = {Robic, Srebrenka},
editor = {Jungck, John},
institution = {Agnes Scott College, Decatur, GA 30030, USA. srobic@agnesscott.edu},
journal = {Cbe Life Sciences Education},
number = {3},
pages = {189--195},
publisher = {American Society for Cell Biology},
title = {{Mathematics, thermodynamics, and modeling to address ten common misconceptions about protein structure, folding, and stability.}},
url = {http://www.lifescied.org/cgi/content/abstract/9/3/189},
volume = {9},
year = {2010}
}
@article{Robinson2005,
abstract = {Ever since the dawn of the computer age, researchers have been trying to find an way of multiplying matrices, a fundamental oper- ation that is a bottleneck for many important . Faster matrix multiplication would give more efficient for many},
author = {Robinson, By Sara},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Robinson - 2005 - Toward an Optimal Algorithm for Matrix Multiplication.pdf:pdf},
journal = {News Journal of the Society for Industrial and Applied},
number = {9},
pages = {1--3},
title = {{Toward an Optimal Algorithm for Matrix Multiplication}},
url = {http://www.siam.org/pdf/news/174.pdf},
volume = {38},
year = {2005}
}
@article{Sack2006,
address = {Berlin, Heidelberg},
author = {Sack, Paul and Elster, Anne C.},
doi = {10.1007/3-540-48051-X},
editor = {Fagerholm, Juha and Haataja, Juha and J\"{a}rvinen, Jari and Lyly, Mikko and R\aa back, Peter and Savolainen, Ville},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Sack, Elster - 2006 - Fast MPI Broadcasts through Reliable Multicasting.pdf:pdf},
isbn = {978-3-540-43786-4},
journal = {Applied Parallel Computing},
month = jul,
pages = {445--453},
publisher = {Springer Berlin Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{Fast MPI Broadcasts through Reliable Multicasting}},
url = {http://www.springerlink.com/content/6lpbml2ku5cfe75g/},
volume = {2367},
year = {2006}
}
@book{Samet1989,
author = {Samet, Hanan},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Samet - 1989 - The Design and Analysis of Spatial Data Structures.pdf:pdf},
isbn = {978-0201502558},
keywords = {Algorithms,Data Structures},
pages = {510},
publisher = {Addison-Wesley},
title = {{The Design and Analysis of Spatial Data Structures}},
url = {http://goo.gl/KJwT9},
year = {1989}
}
@article{SamuelLattimore2005a,
abstract = {Accurately and reliably identifying the actual number of clusters present with a dataset of gene expression profiles, when no additional information on cluster structure is available, is a problem addressed by few algorithms. GeneMCL transforms microarray analysis data into a graph consisting of nodes connected by edges, where the nodes represent genes, and the edges represent the similarity in expression of those genes, as given by a proximity measurement. This measurement is taken to be the Pearson correlation coefficient combined with a local non-linear rescaling step. The resulting graph is input to the Markov Cluster (MCL) algorithm, which is an elegant, deterministic, non-specific and scalable method, which models stochastic flow through the graph. The algorithm is inherently affected by any cluster structure present, and rapidly decomposes a graph into cohesive clusters. The potential of the GeneMCL algorithm is demonstrated with a 5,730 gene subset (IGS) of the Van't Veer breast cancer database, for which the clusterings are shown to reflect underlying biological mechanisms.},
annote = {        From Duplicate 1 (                           GeneMCL in microarray analysis.                         - Samuel Lattimore, B; van Dongen, Stijn; Crabbe, M James C )
And  Duplicate 2 (                           GeneMCL in microarray analysis.                         - Samuel Lattimore, B; van Dongen, Stijn; Crabbe, M James C )
And  Duplicate 4 (                           GeneMCL in microarray analysis.                         - Samuel Lattimore, B; van Dongen, Stijn; Crabbe, M James C )
                
        
        
        From Duplicate 3 (                           GeneMCL in microarray analysis.                         - Samuel Lattimore, B; van Dongen, Stijn; Crabbe, M James C )
                
        
        
      },
author = {{Samuel Lattimore}, B and van Dongen, Stijn Marinus and Crabbe, M James C},
doi = {10.1016/j.compbiolchem.2005.07.002},
issn = {1476-9271},
journal = {Computational biology and chemistry},
keywords = {Algorithms,Breast Neoplasms,Breast Neoplasms: chemistry,Breast Neoplasms: genetics,Cluster Analysis,Computational Biology,Databases,Female,Gene Expression Profiling,Gene Expression Profiling: methods,Genetic,Humans,Markov Chains,Microarray Analysis,Microarray Analysis: methods,Microarray Analysis: statistics \& numerical data,Multigene Family,printed},
mendeley-tags = {printed},
month = oct,
number = {5},
pages = {354--9},
pmid = {16172020},
title = {{GeneMCL in microarray analysis.}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/16172020},
volume = {29},
year = {2005}
}
@article{R.2011,
author = {Sarala, R. and Prakruthi, V. and {Prathibha Annapurna}, P. and Saranya, S.},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Sarala et al. - 2011 - Unification of Subspace Clustering and Outliers Detection On High Dimensional Data.pdf:pdf},
issn = {22296093},
journal = {International Journal of Computer Technology and Applications},
number = {04},
pages = {784--789},
title = {{Unification of Subspace Clustering and Outliers Detection On High Dimensional Data}},
url = {http://www.doaj.org/doaj?func=abstract\&recNo=7\&id=782830\&q1=-p\&f1=all\&b1=and\&q2=\&f2=all\&uiLanguage=en},
volume = {02},
year = {2011}
}
@inproceedings{Sarlos2006,
abstract = {Several results appeared that show significant reduction in time for matrix multiplication, singular value decomposition as well as linear (lscr2) regression, all based on data dependent random sampling. Our key idea is that low dimensional embeddings can be used to eliminate data dependence and provide more versatile, linear time pass efficient matrix computation. Our main contribution is summarized as follows. 1) Independent of the results of Har-Peled and of Deshpande and Vempala, one of the first - and to the best of our knowledge the most efficient - relative error (1 + epsi) parA \$AkparF approximation algorithms for the singular value decomposition of an m times n matrix A with M non-zero entries that requires 2 passes over the data and runs in time O((M(k/epsi+k log k) + (n+m)(k/epsi+k log k)2)log (1/sigma)). 2) The first o(nd2) time (1 + epsi) relative error approximation algorithm for n times d linear (lscr2) regression. 3) A matrix multiplication and norm approximation algorithm that easily applies to implicitly given matrices and can be used as a black box probability boosting tool},
author = {Sarlos, Tamas},
booktitle = {2006 47th Annual IEEE Symposium on Foundations of Computer Science (FOCS'06)},
doi = {10.1109/FOCS.2006.37},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Sarlos - 2006 - Improved Approximation Algorithms for Large Matrices via Random Projections.pdf:pdf},
isbn = {0-7695-2720-5},
keywords = {printed},
mendeley-tags = {printed},
pages = {143--152},
publisher = {IEEE},
title = {{Improved Approximation Algorithms for Large Matrices via Random Projections}},
url = {http://ieeexplore.ieee.org/xpl/freeabs\_all.jsp?arnumber=4031351},
year = {2006}
}
@inproceedings{Satuluri2010,
address = {New York, New York, USA},
author = {Satuluri, Venu and Parthasarathy, Srinivasan and Ucar, Duygu},
booktitle = {Proceedings of the 1st ACM International Conference on Bioinformatics and Computational Biology - BCB '10},
doi = {10.1145/1854776.1854812},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Satuluri, Parthasarathy, Ucar - 2010 - Markov clustering of protein interaction networks with improved balance and scalability.pdf:pdf},
isbn = {9781450304382},
keywords = {printed},
mendeley-tags = {printed},
pages = {247},
publisher = {ACM Press},
title = {{Markov clustering of protein interaction networks with improved balance and scalability}},
url = {http://portal.acm.org/citation.cfm?doid=1854776.1854812},
year = {2010}
}
@article{SCHAEFFER2007,
abstract = {In this survey we overview the definitions and methods for graph clustering, that is, finding sets of related vertices in graphs. We review the many definitions for what is a cluster in a graph and measures of cluster quality. Then we present global algorithms for producing a clustering for the entire vertex set of an input graph, after which we discuss the task of identifying a cluster for a specific seed vertex by local computation. Some ideas on the application areas of graph clustering algorithms are given. We also address the problematics of evaluating clusterings and benchmarking cluster algorithms.},
author = {Schaeffer, S},
doi = {10.1016/j.cosrev.2007.05.001},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Schaeffer - 2007 - Graph clustering.pdf:pdf},
issn = {15740137},
journal = {Computer Science Review},
keywords = {printed},
mendeley-tags = {printed},
month = aug,
number = {1},
pages = {27--64},
publisher = {Elsevier},
title = {{Graph clustering}},
url = {http://dx.doi.org/10.1016/j.cosrev.2007.05.001 http://linkinghub.elsevier.com/retrieve/pii/S1574013707000020},
volume = {1},
year = {2007}
}
@article{Scribe2009,
author = {Scribe, Emanuele Viola and Sundaram, Ravi and The, B and Cohn, H. and Umans, C. and Kleinberg, R. and Szegedy, B. and Let, C},
journal = {Transform},
pages = {1--7},
title = {{Matrix multiplication : a group-theoretic approach Matrix Multiplication}},
year = {2009}
}
@article{Seo2010,
abstract = {Various scientific computations have become so complex, and thus computation tools play an important role. In this paper, we explore the state-of-the-art framework providing high-level matrix computation primitives with MapReduce through the case study approach, and demonstrate these primitives with different computation engines to show the performance and scalability. We believe the opportunity for using MapReduce in scientific computation is even more promising than the success to date in the parallel systems literature.},
author = {Seo, Sangwon and Kim, Jin-soo},
doi = {10.1109/CloudCom.2010.17},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Seo, Kim - 2010 - HAMA An Efficient Matrix Computation with the MapReduce Framework.pdf:pdf},
isbn = {9781424494057},
journal = {System},
keywords = {printed},
mendeley-tags = {printed},
pages = {721--726},
publisher = {Ieee},
title = {{HAMA : An Efficient Matrix Computation with the MapReduce Framework}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5708522},
year = {2010}
}
@book{Shawe-Taylor2004,
abstract = {Kernel methods provide a powerful and unified framework for pattern discovery, motivating algorithms that can act on general types of data (e.g. strings, vectors or text) and look for general types of relations (e.g. rankings, classifications, regressions, clusters). The application areas range from neural networks and pattern recognition to machine learning and data mining. This book, developed from lectures and tutorials, fulfils two major roles: firstly it provides practitioners with a large toolkit of algorithms, kernels and solutions ready to use for standard pattern discovery problems in fields such as bioinformatics, text analysis, image analysis. Secondly it provides an easy introduction for students and researchers to the growing field of kernel-based pattern analysis, demonstrating with examples how to handcraft an algorithm or a kernel for a new specific application, and covering all the necessary conceptual and mathematical tools to do so.},
author = {Shawe-Taylor, J and Cristianini, N},
booktitle = {Elements},
doi = {10.2277},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Shawe-Taylor, Cristianini - 2004 - Kernel Methods for Pattern Analysis.pdf:pdf},
isbn = {0521813972},
issn = {00401706},
number = {2},
pages = {462},
publisher = {Cambridge University Press},
title = {{Kernel Methods for Pattern Analysis}},
url = {http://books.google.com/books?id=9i0vg12lti4C\&pgis=1},
volume = {47},
year = {2004}
}
@article{Shinnou2007,
author = {Shinnou, Hiroyuki and Sasaki, Minoru},
editor = {Calzolari, Nicoletta and Choukri, Khalid and Maegaard, Bente and Mariani, Joseph and Odjik, Jan and Piperidis, Stelios and Tapias, Daniel},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Shinnou, Sasaki - 2007 - Refinement of Document Clustering by Using NMF.pdf:pdf},
isbn = {2951740840},
journal = {Matrix},
pages = {4--12},
publisher = {European Language Resources Association (ELRA)},
title = {{Refinement of Document Clustering by Using NMF}},
url = {http://www.lrec-conf.org/proceedings/lrec2008/},
year = {2007}
}
@inproceedings{Siegel2010,
abstract = {The efficient implementation of sparse matrix-matrix multiplications on high performance parallel machines poses several challenges: large size of input matrices, compressed representation, density of the output matrices, partitioning and load balancing of matrices that present parts with large differences in density and, thus, in computation times. In this paper we show how, starting from the requirements of such application, we developed a framework that allows its efficient implementation on heterogeneous clusters. We introduce a task based programming model and a runtime supported execution model which provides dynamic load balancing on clusters composed by CPUs and GPUs, allowing better utilization of the system while easing the handling of sparse matrices. The results show that our solution, which co-designs the application together with the programming model and the runtime system, is able to obtain significant speedups due to a more effective load balancing with respect to other programming approaches.},
author = {Siegel, Jakob and Villa, Oreste and Krishnamoorthy, Sriram and Tumeo, Antonino and Li, Xiaoming},
booktitle = {2010 IEEE International Conference On Cluster Computing Workshops and Posters (CLUSTER WORKSHOPS)},
doi = {10.1109/CLUSTERWKSP.2010.5613109},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Siegel et al. - 2010 - Efficient sparse matrix-matrix multiplication on heterogeneous high performance systems.pdf:pdf},
isbn = {978-1-4244-8395-2},
keywords = {printed},
mendeley-tags = {printed},
month = sep,
pages = {1--8},
publisher = {IEEE},
title = {{Efficient sparse matrix-matrix multiplication on heterogeneous high performance systems}},
url = {http://ieeexplore.ieee.org/xpl/freeabs\_all.jsp?arnumber=5613109},
year = {2010}
}
@article{Sindhwani2011,
abstract = {Social media platforms such as blogs, Twitter accounts, and online discussion sites are large-scale forums where every individual can potentially voice an influential public opinion. According to recent surveys, a massive number of Internet users are turning to such forums to collect recommendations and reviews for products and services, and to shape their individual choices and stances by the commentary of the online community as a whole. The unsupervised extraction of insight from unstructured user-generated web content requires new methodologies that are likely to be rooted in natural language processing and machine-learning techniques. Furthermore, the unprecedented scale of data begging to be analyzed necessitates the implementation of these methodologies on modern distributed computing platforms. In this paper, we describe a flexible new family of low-rank matrix approximation algorithms for modeling topics in a given corpus of documents (e.g., blog posts and tweets). We benchmark distributed optimization algorithms for running these models in a Hadoop-enabled cluster environment. We describe online learning strategies for tracking the evolution of ongoing topics and rapidly detecting the emergence of new themes in a streaming setting. 2011 IBM.},
author = {Sindhwani, V. and Ghoting, A. and Ting, E. and Lawrence, R.},
journal = {IBM Journal of Research and Development},
number = {5},
title = {{Extracting insights from social media with large-scale matrix approximations}},
url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-81555199208\&partnerID=40\&md5=c797e927351fae86a55e93cbe231ddc8},
volume = {55},
year = {2011}
}
@book{Skiena2008,
abstract = {This expanded and updated second edition of a classic bestseller continues to take the "mystery" out of designing and analyzing algorithms and their efficacy and efficiency. Expanding on the highly successful formula of the first edition, the book now serves as the primary textbook of choice for any algorithm design course while maintaining its status as the premier practical reference guide to algorithms. NEW: (1) Incorporates twice the tutorial material and exercises. (2) Provides full online support for lecturers, and a completely updated and improved website component with lecture slides, audio and video. (3) Contains a highly unique catalog of the 75 most important algorithmic problems. (4) Includes new "war stories" and "interview problems", relating experiences from real-world applications. Written by a well-known, IEEE Computer Science teaching-award winner, this new edition is an essential learning tool for students needing a solid grounding in algorithms, as well as a uniquely comprehensive text/reference for professionals.},
address = {London},
author = {Skiena, Steven S.},
doi = {10.1007/978-1-84800-070-4},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Skiena - 2008 - The Algorithm Design Manual.pdf:pdf},
isbn = {978-1-84800-069-8},
publisher = {Springer London},
title = {{The Algorithm Design Manual}},
url = {http://www.springerlink.com/index/10.1007/978-1-84800-070-4},
year = {2008}
}
@article{Slonim2000,
author = {Slonim, N and Tishby, N},
doi = {10.1145/345508.345578},
editor = {Yannakoudakis, Emmanuel and Belkin, Nicholas J and Leong, Mun-Kew and Ingwersen, Peter},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Slonim, Tishby - 2000 - Document clustering using word clusters via the information bottleneck method.pdf:pdf},
institution = {ACM New York, NY, USA},
isbn = {1581132263},
journal = {Proceedings of the 23rd annual international ACM SIGIR conference on Research and development in information retrieval SIGIR 00},
pages = {208--215},
publisher = {ACM Press},
series = {Clustering},
title = {{Document clustering using word clusters via the information bottleneck method}},
url = {http://portal.acm.org/citation.cfm?doid=345508.345578},
year = {2000}
}
@book{Solomonik2011,
address = {Berlin, Heidelberg},
author = {Solomonik, Edgar and Demmel, James},
booktitle = {Science},
doi = {10.1007/978-3-642-23397-5},
editor = {Jeannot, Emmanuel and Namyst, Raymond and Roman, Jean},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Solomonik, Demmel - 2011 - Euro-Par 2011 Parallel Processing.pdf:pdf},
institution = {EECS Technical Report EECS-2011-10, UC Berkeley},
isbn = {978-3-642-23396-8},
publisher = {Springer Berlin Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{Euro-Par 2011 Parallel Processing}},
url = {http://www.springerlink.com/index/10.1007/978-3-642-23397-5},
volume = {6853},
year = {2011}
}
@inproceedings{Sra2008,
abstract = {In this paper we present new algorithms for non-negative matrix approximation (NMA), commonly known as the NMF problem. Our methods improve upon the well-known methods of Lee \& Seung [12] for both the Frobenius norm as well the Kullback-Leibler divergence versions of the problem. For the latter problem, our results are especially interesting because it seems to have witnessed much lesser algorithmic progress as compared to the Frobenius norm NMA problem. Our algorithms are based on a particular block-iterative acceleration technique for EM, which preserves the multiplicative nature of the updates and also ensures monotonicity. Furthermore, our algorithms also naturally apply to the Bregman-divergence NMA algorithms of [6]. Experimentally,we show that our algorithms outperform the traditional Lee/Seung approach most of the time.},
author = {Sra, Suvrit},
booktitle = {2008 8th IEEE International Conference on Data Mining},
doi = {10.1109/ICDM.2008.77},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Sra - 2008 - Block-Iterative Algorithms for Non-negative Matrix Approximation.pdf:pdf},
isbn = {978-0-7695-3502-9},
keywords = {printed},
mendeley-tags = {printed},
month = dec,
pages = {1037--1042},
publisher = {IEEE},
title = {{Block-Iterative Algorithms for Non-negative Matrix Approximation}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4781221},
year = {2008}
}
@techreport{Sra2006,
abstract = {Low dimensional data representations are crucial to numerous applications in machine learning, statis- tics, and signal processing. Nonnegative matrix approximation (NNMA) is a method for dimensionality reduction that respects the nonnegativity of the input data while constructing a low-dimensional approx- imation. NNMA has been used in a multitude of applications, though without commensurate theoretical development. In this report we describe generic methods for minimizing generalized divergences between the input and its lowrank approximant. Some of our generalmethods are even extensible to arbitrary convex penalties. Our methods yield efficient multiplicative iterative schemes for solving the proposed problems. We also consider interesting extensions such as the use of penalty functions, non-linear relationships via link functions, weighted errors, and multi-factor approximations. We present some experiments as an illustration of our algorithms. For completeness, the report also includes a brief literature survey of the various algorithms and the applications of NNMA.},
author = {Sra, Suvrit and Dhillon, Inderjit S},
booktitle = {SciencesNew York},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Sra, Dhillon - 2006 - Nonnegative Matrix Approximation Algorithms and Applications.pdf:pdf},
institution = {University of Texas at Austin},
keywords = {bregman divergence,dimensionality reduction,link functions,multiplicative,nonnegative matrix factorization,printed,updates,weighted approximation},
mendeley-tags = {printed},
pages = {1--36},
publisher = {Citeseer},
title = {{Nonnegative Matrix Approximation : Algorithms and Applications}},
url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.80.107\&amp;rep=rep1\&amp;type=pdf},
year = {2006}
}
@inproceedings{SRIHARI2009,
address = {London},
author = {Srihari, Sriganesh and Ning, Kang and Leong, Hon Wai},
booktitle = {Genome Informatics 2009 - Proceedings of the 20th International Conference},
doi = {10.1142/9781848165632\_0015},
isbn = {9781848165625},
pages = {159--168},
publisher = {Imperial College Press},
series = {Genome Informatics Series},
title = {{Refining Markov Clustering for Protein Complex Prediction by Incorporating Core-Attachment Structure}},
url = {http://eproceedings.worldscinet.com/9781848165632/9781848165632\_0015.html},
year = {2009}
}
@article{Srihari2010,
abstract = {The reconstruction of protein complexes from the physical interactome of organisms serves as a building block towards understanding the higher level organization of the cell. Over the past few years, several independent high-throughput experiments have helped to catalogue enormous amount of physical protein interaction data from organisms such as yeast. However, these individual datasets show lack of correlation with each other and also contain substantial number of false positives (noise). Over these years, several affinity scoring schemes have also been devised to improve the qualities of these datasets. Therefore, the challenge now is to detect meaningful as well as novel complexes from protein interaction (PPI) networks derived by combining datasets from multiple sources and by making use of these affinity scoring schemes. In the attempt towards tackling this challenge, the Markov Clustering algorithm (MCL) has proved to be a popular and reasonably successful method, mainly due to its scalability, robustness, and ability to work on scored (weighted) networks. However, MCL produces many noisy clusters, which either do not match known complexes or have additional proteins that reduce the accuracies of correctly predicted complexes.},
author = {Srihari, Sriganesh and Ning, Kang and Leong, Hon Wai},
doi = {10.1186/1471-2105-11-504},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Srihari, Ning, Leong - 2010 - MCL-CAw a refinement of MCL for detecting yeast complexes from weighted PPI networks by incorporating core-attachment structure.pdf:pdf},
issn = {1471-2105},
journal = {BMC Bioinformatics},
keywords = {Cluster Analysis,Databases,Markov Chains,Protein,Protein Interaction Mapping,Protein Interaction Mapping: methods,Proteins,Proteins: chemistry,Proteins: metabolism,Saccharomyces cerevisiae,Saccharomyces cerevisiae Proteins,Saccharomyces cerevisiae Proteins: chemistry,Saccharomyces cerevisiae Proteins: metabolism,Saccharomyces cerevisiae: metabolism,Software,printed},
mendeley-tags = {printed},
month = jan,
number = {1},
pages = {504},
pmid = {20939868},
title = {{MCL-CAw: a refinement of MCL for detecting yeast complexes from weighted PPI networks by incorporating core-attachment structure.}},
url = {http://www.biomedcentral.com/1471-2105/11/504 http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=2965181\&tool=pmcentrez\&rendertype=abstract},
volume = {11},
year = {2010}
}
@incollection{States1992,
author = {States, David J. and Boguski, Mark S.},
booktitle = {Sequence Analysis Primer},
chapter = {3},
editor = {Gribskov, Michael and Devereux, John},
isbn = {9780195098747},
pages = {89--157},
publisher = {Oxford University Press},
title = {{Similarity and Homology}},
year = {1992}
}
@article{Stefan2003,
abstract = {We present an algorithm to reconstruct a collection of disjoint smooth closed curves
from noisy samples. Our noise model assumes that the samples are obtained by ﬁrst draw-
ing points on the curves according to a locally uniform distribution followed by a uniform
perturbation in the normal directions. Our reconstruction is faithful with probability ap-
proaching 1 as the sampling density increases.},
author = {Stefan, Siu-Wing Cheng and Mordecai, Funke},
journal = {Computational Geometry},
pages = {1--36},
title = {{Curve Reconstruction from Noisy Samples}},
year = {2003}
}
@article{Stefani2008,
abstract = {Protein folding, misfolding and aggregation, as well as the way misfolded and aggregated proteins affects cell viability are emerging as key themes in molecular and structural biology and in molecular medicine. Recent advances in the knowledge of the biophysical basis of protein folding have led to propose the energy landscape theory which provides a consistent framework to better understand how a protein folds rapidly and efficiently to the compact, biologically active structure. The increased knowledge on protein folding has highlighted its strict relation to protein misfolding and aggregation, either process being in close competition with the other, both relying on the same physicochemical basis. The theory has also provided information to better understand the structural and environmental factors affecting protein folding resulting in protein misfolding and aggregation into ordered or disordered polymeric assemblies. Among these, particular importance is given to the effects of surfaces. The latter, in some cases make possible rapid and efficient protein folding but most often recruit proteins/peptides increasing their local concentration thus favouring misfolding and accelerating the rate of nucleation. It is also emerging that surfaces can modify the path of protein misfolding and aggregation generating oligomers and polymers structurally different from those arising in the bulk solution and endowed with different physical properties and cytotoxicities.},
author = {Stefani, Massimo},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Stefani - 2008 - Protein Folding and Misfolding on Surfaces.pdf:pdf},
institution = {Department of Biochemical Sciences and Research Centre on the Molecular Basis of Neurodegeneration, University of Florence, Italy. stefani@scibio.unifi.it},
journal = {International Journal of Molecular Sciences},
keywords = {amyloid,amyloid cytotoxicity,amyloid fibrils,protein aggregation,protein folding,protein misfolding},
number = {12},
pages = {2515--2542},
publisher = {Molecular Diversity Preservation International (MDPI)},
title = {{Protein Folding and Misfolding on Surfaces}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/19330090},
volume = {9},
year = {2008}
}
@article{Stein1860,
author = {Stein, Benno and Busch, Michael},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Stein, Busch - 1860 - Density-based Cluster Algorithms in Low-dimensional and High-dimensional Applications.pdf:pdf},
journal = {Information Retrieval},
keywords = {cluster algorithm,cluster algorithms,cluster analysis,density based cluster analysis,document categorization,high dimensional data},
number = {Tir 05},
title = {{Density-based Cluster Algorithms in Low-dimensional and High-dimensional Applications}},
year = {1860}
}
@article{Stojmirovic2007,
abstract = {Interaction networks, consisting of agents linked by their interactions, are ubiquitous across many disciplines of modern science. Many methods of analysis of interaction networks have been proposed, mainly concentrating on node degree distribution or aiming to discover clusters of agents that are very strongly connected between themselves. These methods are principally based on graph-theory or machine learning. We present a mathematically simple formalism for modelling context-specific information propagation in interaction networks based on random walks. The context is provided by selection of sources and destinations of information and by use of potential functions that direct the flow towards the destinations. We also use the concept of dissipation to model the aging of information as it diffuses from its source. Using examples from yeast protein-protein interaction networks and some of the histone acetyltransferases involved in control of transcription, we demonstrate the utility of the concepts and the mathematical constructs introduced in this paper.},
author = {Stojmirovi\'{c}, Aleksandar and Yu, Yi-Kuo},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Stojmirovi\'{c}, Yu - 2007 - Information flow in interaction networks.pdf:pdf},
institution = {National Center for Biotechnology Information, National Library of Medicine, National Institutes of Health, Bethesda, Maryland 20894, USA.},
journal = {Journal of computational biology a journal of computational molecular cell biology},
keywords = {biological,computational biology,histone acetyltransferases,histone acetyltransferases metabolism,information theory,markov chains,mathematics,models,printed,protein interaction mapping,saccharomyces cerevisiae proteins,saccharomyces cerevisiae proteins metabolism,theoretical,transcription factors,transcription factors metabolism},
mendeley-tags = {printed},
number = {8},
pages = {1115--1143},
pmid = {17985991},
title = {{Information flow in interaction networks.}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/17985991},
volume = {14},
year = {2007}
}
@article{Sul2011,
author = {Sul, Seung-Jin and Tovchigrechko, Andrey},
doi = {10.1109/IPDPS.2011.180},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Sul, Tovchigrechko - 2011 - Parallelizing BLAST and SOM Algorithms with MapReduce-MPI Library.pdf:pdf},
isbn = {9781612844251},
journal = {2011 IEEE International Symposium on Parallel and Distributed Processing Workshops and Phd Forum},
keywords = {bioinformatics,high performance computing,map reduce,parallel algorithms},
pages = {481--489},
publisher = {Ieee},
title = {{Parallelizing BLAST and SOM Algorithms with MapReduce-MPI Library}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6008868},
year = {2011}
}
@inproceedings{Sulatycke2002,
abstract = {Several fast sequential algorithms have been proposed in the past to multiply sparse matrices. These algorithms do not explicitly address the impact of caching on performance. We show that a rather simple sequential cache-efficient algorithm provides significantly better performance than existing algorithms for sparse matrix multiplication. We then describe a multithreaded implementation of this simple algorithm and show that its performance scales well with the number of threads and CPUs. For 10\% sparse, 500 X 500 matrices, the multithreaded version running on 4-CPU systems provides more than a 41.1-fold speed increase over the well-known BLAS routine and a 14.6 fold and 44.6-fold speed increase over two other recent techniques for fast sparse matrix multiplication, both of which are relatively difficult to parallelize efficiently. Keywords: sparse matrix multiplication, caching, loop interchanging 1. Introduction The need to efficiently multiply two sparse matrices is critica...},
author = {Sulatycke, Peter D and Ghose, Kanad},
booktitle = {Proceedings 12th International Parallel Processing Symposium},
doi = {10.1.1.35.9732},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Sulatycke, Ghose - 2002 - Caching-efficient multithreaded fast multiplication of sparse matrices.pdf:pdf},
isbn = {0818684046},
keywords = {caching,loop,printed,sparse matrix multiplication},
mendeley-tags = {printed},
pages = {117--123},
publisher = {IEEE Computer Society},
title = {{Caching-efficient multithreaded fast multiplication of sparse matrices}},
url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.35.9732},
year = {2002}
}
@article{Sun2011,
author = {Sun, Peng Gang},
doi = {10.7150/ijbs.7.61},
issn = {1449-2288},
journal = {International Journal of Biological Sciences},
pages = {61},
title = {{Prediction of Human Disease-Related Gene Clusters by Clustering Analysis}},
url = {http://www.biolsci.org/v07p0061.htm},
year = {2011}
}
@inproceedings{Sun2009,
abstract = {Accumulating evidence suggests that biological systems are composed of interacting, separable, functional modules-groups of vertices within which connections are dense but between which they are sparse. Identifying these modules is essential to understand the organization of biological systems. However, the most existing deterministic algorithms only find ldquodenserdquo clusters. Actually, the modules are of differing sizes, densities and shapes. In this paper, we take into account the property of diversity of module topological structure, propose an efficient algorithm relying on density and shared neighbors for detecting overlapping modules in PPI (protein-protein interaction) networks. Our algorithm first finds the skeleton of the modules, SNCS(Shared Neighbor Connected Subgraph), then constructs the modules by expanding the leaf vertices of SNCS based on shared neighbors. Furthermore, since the PPI network is noisy and still incomplete, some methods treat the PPI networks as weighted graphs in which each edge (e.g., interaction) is associated with a weight representing the probability or reliability of that interaction for preprocessing and purifying PPI data. Thus, we extend our method into weighted networks which takes into account the link weights in a more delicate way by incorporating the subgraph intensity. We test our method on PPI networks. Our analysis of the yeast PPI network suggests that most of these modules have well biological significance in the context of protein localization, function annotation.},
author = {Sun, Peng Gang and Gao, Lin},
booktitle = {2009 9th IEEE International Conference on Bioinformatics and BioEngineering},
doi = {10.1109/BIBE.2009.6},
isbn = {978-0-7695-3656-9},
month = jun,
pages = {228--235},
publisher = {IEEE},
title = {{Algorithms Based on Density and Shared Neighbors for Functional Modules Identification in PPI Networks}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5211274},
year = {2009}
}
@inproceedings{Sun2010,
abstract = {CUDA (Compute Unified Device Architecture) acceleration of very large scale matrix-vector and matrix-matrix multiplication is presented in this paper. The intrinsic parallelism in the matrix computations are exploited thoroughly. By dividing the entire matrix computation to multiple sub-groups, scalable performance improvement can be achieved using multiple GPUs. The key operations are accelerated by GPU. And the CUDA related data storage, threads hierarchy, and kernel implementation are proposed. Several optimization methods including coalesced global memory access, on-the-fly reduction, bank conflict free shared memory usage, loop unrolling, removing unnecessary synchronization, and concurrent execution on the device through streams are also employed. Experiment results show that about 8.5 times speedup can be achieved for CUDA accelerated matrix multiplication maximally.},
author = {Sun, Yinghong and Tong, Yuanman},
booktitle = {2010 International Conference on Parallel and Distributed Computing, Applications and Technologies},
doi = {10.1109/PDCAT.2010.45},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Sun, Tong - 2010 - CUDA Based Fast Implementation of Very Large Matrix Computation.pdf:pdf},
isbn = {978-1-4244-9110-0},
keywords = {printed},
mendeley-tags = {printed},
month = dec,
pages = {487--491},
publisher = {IEEE},
title = {{CUDA Based Fast Implementation of Very Large Matrix Computation}},
url = {http://ieeexplore.ieee.org/xpl/freeabs\_all.jsp?arnumber=5704475},
year = {2010}
}
@book{Pang-NingTan2006,
author = {Tan, Pang-Ning and Steinbach, Michel and Kumar, Vipin},
pages = {769},
publisher = {Addison-Wesley},
title = {{Introduction to Data Mining}},
year = {2006}
}
@article{Taylor2010,
abstract = {Bioinformatics researchers are now confronted with analysis of ultra large-scale data sets, a problem that will only increase at an alarming rate in coming years. Recent developments in open source software, that is, the Hadoop project and associated software, provide a foundation for scaling to petabyte scale data warehouses on Linux clusters, providing fault-tolerant parallelized analysis on such data using a programming style named MapReduce.},
author = {Taylor, Ronald C},
doi = {10.1186/1471-2105-11-S12-S1},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Taylor - 2010 - An overview of the HadoopMapReduceHBase framework and its current applications in bioinformatics.pdf:pdf},
issn = {1471-2105},
journal = {BMC Bioinformatics},
keywords = {Algorithms,Cluster Analysis,Computational Biology,Computational Biology: methods,High-Throughput Nucleotide Sequencing,Software},
month = jan,
pages = {S1},
pmid = {21210976},
title = {{An overview of the Hadoop/MapReduce/HBase framework and its current applications in bioinformatics.}},
url = {http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=3040523\&tool=pmcentrez\&rendertype=abstract},
volume = {11 Suppl 1},
year = {2010}
}
@book{Tel2000,
abstract = {The second edition of this successful textbook provides an up-to-date introduction both to distributed algorithms and to the theory behind them. The clear presentation makes the book suitable for advanced undergraduate or graduate courses, while the coverage is sufficiently deep to make it useful for practicing engineers and researchers. The author concentrates on algorithms for the point-to-point message passing model and includes algorithms for the implementation of computer communication networks. Two new chapters on sense of direction and failure detectors are state of the art and will provide an entry to research in these still-developing topics.},
author = {Tel, Gerard},
edition = {2},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Tel - 2000 - Introduction to Distributed Algorithms.djvu:djvu},
isbn = {978-0521794831},
language = {English},
pages = {612},
publisher = {Cambridge University Press},
title = {{Introduction to Distributed Algorithms}},
url = {http://www.amazon.com/Introduction-Distributed-Algorithms-Gerard-Tel/dp/0521794838},
year = {2000}
}
@techreport{Goh2008,
author = {Teng, Goh Cheng},
booktitle = {Matrix},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Teng - 2008 - Matrix-Matrix Multiplication on GPU in Octave.pdf:pdf},
institution = {Advanced Computing Group, ASTAR},
pages = {1--10},
title = {{Matrix-Matrix Multiplication on GPU in Octave}},
url = {http://docs.ihpc.a-star.edu.sg/papers/MMGPUOctave.pdf},
year = {2008}
}
@article{Tetko2005a,
abstract = {Detection of sequence homologues represents a challenging task that is important for the discovery of protein families and the reliable application of automatic annotation methods. The presence of domains in protein families of diverse function, inhomogeneity and different sizes of protein families create considerable difficulties for the application of published clustering methods.},
annote = {        From Duplicate 1 (                           Super paramagnetic clustering of protein sequences.                         - Tetko, Igor V; Facius, Axel; Ruepp, Andreas; Mewes, Hans-Werner )
                
        
        
        From Duplicate 2 (                           Super paramagnetic clustering of protein sequences.                         - Tetko, Igor V; Facius, Axel; Ruepp, Andreas; Mewes, Hans-Werner )
                
        
        
      },
author = {Tetko, Igor V and Facius, Axel and Ruepp, Andreas and Mewes, Hans-Werner},
doi = {10.1186/1471-2105-6-82},
issn = {1471-2105},
journal = {BMC Bioinformatics},
keywords = {Algorithms,Amino Acid,Bacillus subtilis,Bacillus subtilis: metabolism,Bacterial,Cluster Analysis,Computational Biology,Computational Biology: methods,Computer Graphics,Computer Simulation,Database Management Systems,Databases,Genes,Genetic,Genome,Helicobacter pylori,Helicobacter pylori: metabolism,Information Storage and Retrieval,Listeria,Listeria monocytogenes,Listeria monocytogenes: metabolism,Listeria: metabolism,Magnetics,Markov Chains,Monte Carlo Method,Nucleic Acid,Programming Languages,Protein,Protein Structure,Proteins,Proteins: chemistry,Sequence Alignment,Sequence Homology,Software,Tertiary,printed},
mendeley-tags = {printed},
month = jan,
pages = {82},
pmid = {15804359},
title = {{Super paramagnetic clustering of protein sequences.}},
url = {http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=1084344\&tool=pmcentrez\&rendertype=abstract},
volume = {6},
year = {2005}
}
@article{Thachuk2007,
abstract = {Background: The ab initio protein folding problem consists of predicting protein tertiary structure from a given amino acid sequence by minimizing an energy function; it is one of the most important and challenging problems in biochemistry, molecular biology and biophysics. The ab initio protein folding problem is computationally challenging and has been shown to be N P MathTypeMTEF55+=feaafiart1ev1aaatCvAUfKttLearuWrP9MDH5MBPbIqV92AaeXatLxBI9gBaebbnrfifHhDYfgasaacH8akY=wiFfYdH8Gipec8Eeeu0xXdbba9frFj0=OqFfea0dXdd9vqai=hGuQ8kuc9pgc9s8qqaq=dirpe0xb9q8qiLsFr0=vr0=vr0dc8meaabaqaciaacaGaaeqabaqabeGadaaakeaat0uy0HwzTfgDPnwy1egaryqtHrhAL1wy0L2yHvdaiqaacqWFneVtcqqGqbauaaa3961-hard even when conformations are restricted to a lattice. In this work, we implement and evaluate the replica exchange Monte Carlo (REMC) method, which has already been applied very successfully to more complex protein models and other optimization problems with complex energy landscapes, in combination with the highly effective pull move neighbourhood in two widely studied Hydrophobic Polar (HP) lattice models. Results: We demonstrate that REMC is highly effective for solving instances of the square (2D) and cubic (3D) HP protein folding problem. When using the pull move neighbourhood, REMC outperforms current state-of-the-art algorithms for most benchmark instances. Additionally, we show that this new algorithm provides a larger ensemble of ground-state structures than the existing state-of-the-art methods. Furthermore, it scales well with sequence length, and it finds significantly better conformations on long biological sequences and sequences with a provably unique ground-state structure, which is believed to be a characteristic of real proteins. We also present evidence that our REMC algorithm can fold sequences which exhibit significant interaction between termini in the hydrophobic core relatively easily. Conclusion: We demonstrate that REMC utilizing the pull move neighbourhood significantly outperforms current state-of-the-art methods for protein structure prediction in the HP model on 2D and 3D lattices. This is particularly noteworthy, since so far, the state-of-the-art methods for 2D and 3D HP protein folding in particular, the pruned-enriched Rosenbluth method (PERM) and, to some extent, Ant Colony Optimisation (ACO) were based on chain growth mechanisms. To the best of our knowledge, this is the first application of REMC to HP protein folding on the cubic lattice, and the first extension of the pull move neighbourhood to a 3D lattice.},
author = {Thachuk, Chris and Shmygelska, Alena and Hoos, Holger H},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Thachuk, Shmygelska, Hoos - 2007 - A replica exchange Monte Carlo algorithm for protein folding in the HP model.pdf:pdf},
institution = {Department of Computer Science, University of British Columbia, BC, V6T 1Z4, Canada. cthachuk@sfu.ca},
journal = {BMC Bioinformatics},
keywords = {not printed},
mendeley-tags = {not printed},
number = {1},
pages = {342},
publisher = {BioMed Central|1},
title = {{A replica exchange Monte Carlo algorithm for protein folding in the HP model}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/17875212},
volume = {8},
year = {2007}
}
@article{Theodosiou2008,
abstract = {Biomedical literature is the principal repository of biomedical knowledge, with PubMed being the most complete database collecting, organizing and analyzing such textual knowledge. There are numerous efforts that attempt to exploit this information by using text mining and machine learning techniques. We developed a novel approach, called PuReD-MCL (Pubmed Related Documents-MCL), which is based on the graph clustering algorithm MCL and relevant resources from PubMed.},
author = {Theodosiou, Theodosios and Darzentas, Nikos and Angelis, Lefteris and Ouzounis, Christos A.},
doi = {10.1093/bioinformatics/btn318},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Theodosiou et al. - 2008 - PuReD-MCL a graph-based PubMed document clustering methodology.pdf:pdf},
issn = {1367-4811},
journal = {Bioinformatics (Oxford, England)},
keywords = {Algorithms,Artificial Intelligence,Automated,Cluster Analysis,Database Management Systems,Information Storage and Retrieval,Information Storage and Retrieval: methods,Natural Language Processing,Pattern Recognition,PubMed,Software,automated methods},
month = sep,
number = {17},
pages = {1935--41},
pmid = {18593717},
title = {{PuReD-MCL: a graph-based PubMed document clustering methodology.}},
url = {http://bioinformatics.oxfordjournals.org/cgi/content/abstract/24/17/1935},
volume = {24},
year = {2008}
}
@article{Thottethodi1998,
author = {Thottethodi, Mithuna and Chatterjee, Siddhartha and Lebeck, Alvin R.},
doi = {http://dx.doi.org/10.1109/SC.1998.10045},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Thottethodi, Chatterjee, Lebeck - 1998 - Tuning Strassen's matrix multiplication for memory efficiency.pdf:pdf},
isbn = {0-89791-984-X},
keywords = {cache memory,data layout,matrix multiply,printed,strassen's algorithm},
mendeley-tags = {printed},
month = nov,
pages = {1--14},
title = {{Tuning Strassen's matrix multiplication for memory efficiency}},
url = {http://dl.acm.org/ft\_gateway.cfm?id=509094\&type=html},
year = {1998}
}
@inproceedings{Tu2010,
abstract = {We propose a binary matrix factorization (BMF) algorithm under the Bayesian Ying-Yang (BYY) harmony learning, to detect protein complexes by clustering the proteins which share similar interactions through factorizing the binary adjacent matrix of the protein-protein interaction (PPI) network. The proposed BYY-BMF algorithm automatically determines the cluster number while this number is usually specified for most existing BMF algorithms. Also, BYY-BMF's clustering results does not depend on any parameters or thresholds, unlike the Markov Cluster Algorithm (MCL) that relies on a so-called inflation parameter. On synthetic PPI networks, the predictions evaluated by the known annotated complexes indicate that BYY-BMF is more robust than MCL for most cases. Moreover, BYY-BMF obtains a better balanced prediction accuracies than MCL and a spectral analysis method, on real PPI networks from the MIPS and DIP databases.},
author = {Tu, Shikui and Xu, Lei and Chen, Runsheng},
booktitle = {2010 IEEE International Conference on Bioinformatics and Biomedicine Workshops (BIBMW)},
doi = {10.1109/BIBMW.2010.5703783},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Tu, Xu, Chen - 2010 - A binary matrix factorization algorithm for protein complex prediction.pdf:pdf},
isbn = {978-1-4244-8303-7},
keywords = {not printed},
mendeley-tags = {not printed},
month = dec,
pages = {113--118},
publisher = {IEEE},
title = {{A binary matrix factorization algorithm for protein complex prediction}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5703783},
year = {2010}
}
@article{Ullah2010,
abstract = {Background: The protein folding problem remains one of the most challenging open problems in computational biology. Simplified models in terms of lattice structure and energy function have been proposed to ease the computational hardness of this optimization problem. Heuristic search algorithms and constraint programming are two common techniques to approach this problem. The present study introduces a novel hybrid approach to simulate the protein folding problem using constraint programming technique integrated within local search. Results: Using the face-centered-cubic lattice model and 20 amino acid pairwise interactions energy function for the protein folding problem, a constraint programming technique has been applied to generate the neighbourhood conformations that are to be used in generic local search procedure. Experiments have been conducted for a few small and medium sized proteins. Results have been compared with both pure constraint programming approach and local search using well-established local move set. Substantial improvements have been observed in terms of final energy values within acceptable runtime using the hybrid approach. Conclusion: Constraint programming approaches usually provide optimal results but become slow as the problem size grows. Local search approaches are usually faster but do not guarantee optimal solutions and tend to stuck in local minima. The encouraging results obtained on the small proteins show that these two approaches can be combined efficiently to obtain better quality solutions within acceptable time. It also encourages future researchers on adopting hybrid techniques to solve other hard optimization problems.},
author = {Ullah, Abu Dayem and Steinh\"{o}fel, Kathleen},
institution = {King's College London, Department of Computer Science, London WC2R 2LS, UK. abu.dayem\_ullah@kcl.ac.uk},
journal = {BMC Bioinformatics},
keywords = {not printed},
mendeley-tags = {not printed},
number = {Suppl 1},
pages = {S39},
publisher = {BioMed Central},
title = {{A hybrid approach to protein folding problem integrating constraint programming with local search}},
url = {http://calcium.dcs.kcl.ac.uk/1379/},
volume = {11},
year = {2010}
}
@phdthesis{VanDongen2000,
author = {van Dongen, Stijn Marinus},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/van Dongen - 2000 - Graph Clustering by Flow Simulation.pdf:pdf},
keywords = {Flow Simulation,Graph Theory,printed},
mendeley-tags = {Flow Simulation,Graph Theory,printed},
pages = {173},
school = {University of Utrecht},
title = {{Graph Clustering by Flow Simulation}},
year = {2000}
}
@techreport{VanDongen2000a,
abstract = {In\~{}[1] a cluster algorithm for graphs was introduced called the Markov cluster algorithm or MCL\~{}algorithm. The algorithm is based on simulation of (stochastic) flow in graphs by means of alternation of two operators, expansion and inflation. The results in\~{}[2] establish an intrinsic relationship between the corresponding algebraic process (MCL\~{}process) and cluster structure in the iterands and the limits of the process. Several kinds of experiments conducted with the MCL\~{}algorithm are described here. Test cases with varying homogeneity characteristics are used to establish some of the particular strengths and weaknesses of the algorithm. In general the algorithm performs well, except for graphs which are very homogeneous (such as weakly connected grids) and for which the natural cluster diameter (i.e. the diameter of a subgraph induced by a natural cluster) is large. This can be understood in terms of the flow characteristics of the MCL\~{}algorithm and the heuristic on which the algorithm is grounded. A generic performance criterion for clusterings of weighted graphs is derived, by a stepwise refinement of a simple and appealing criterion for simple graphs. The most refined criterion uses a particular Schur convex function, several properties of which are established. A metric is defined on the space of partitions, which is useful for comparing different clusterings of the same graph. The metric is compared with the metric known as the equivalence mismatch coefficient. The performance criterion and the metric are used for the quantitative measurement of experiments conducted with the MCL\~{}algorithm on randomly generated test graphs with 10000 nodes. Scaling the MCL\~{}algorithm requires a regime of pruning the stochastic matrices which need to be computed. The effect of pruning on the quality of the retrieved clusterings is also investigated. [1] A cluster algorithm for graphs. Technical report INS-R0010, National Research Institute for Mathematics and Computer Science in the Netherlands, Amsterdam, 2000. [2] A stochastic uncoupling process for graphs. Technical report INS-R0011, National Research Institute for Mathematics and Computer Science in the Netherlands, Amsterdam, 2000.},
author = {van Dongen, Stijn Marinus},
doi = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.26.9783},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/van Dongen - 2000 - Performance criteria for graph clustering and Markov cluster experiments.pdf:pdf},
institution = {National Research Institute for Mathematics and Computer Science},
keywords = {printed},
mendeley-tags = {printed},
month = may,
pages = {36},
title = {{Performance criteria for graph clustering and Markov cluster experiments}},
url = {http://dl.acm.org/citation.cfm?id=868979},
year = {2000}
}
@article{VanDongen2012,
abstract = {MCL is a general purpose cluster algorithm for both weighted and unweighted networks. The algorithm utilises network topology as well as edge weights, is highly scalable and has been applied in a wide variety of bioinformatic methods. In this chapter, we give protocols and case studies for clustering of networks derived from, respectively, protein sequence similarities and gene expression profile correlations.},
author = {van Dongen, Stijn Marinus and Abreu-Goodger, Cei},
doi = {10.1007/978-1-61779-361-5\_15},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/van Dongen, Abreu-Goodger - 2012 - Using MCL to extract clusters from networks.pdf:pdf},
issn = {1940-6029},
journal = {Methods in molecular biology},
month = jan,
pages = {281--95},
pmid = {22144159},
title = {{Using MCL to extract clusters from networks.}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/22144159},
volume = {804},
year = {2012}
}
@article{Vastenhouw2005,
abstract = {A new method is presented for distributing data in sparse matrix-vector multiplication. The method is two-dimensional, tries to minimize the true communication volume, and also tries to spread the computation and communication work evenly over the processors. The method starts with a recursive bipartitioning of the sparse matrix, each time splitting a rectangular matrix into two parts with a nearly equal number of nonzeros. The communication volume caused by the split is minimized. After the matrix partitioning, the input and output vectors are partitioned with the objective of minimizing the maximum communication volume per processor. Experimental results of our implementation, Mondriaan, for a set of sparse test matrices show a reduction in communication volume compared to one-dimensional methods, and in general a good balance in the communication work. Experimental timings of an actual parallel sparse matrix-vector multiplication on an SGI Origin 3800 computer show that a sufficiently large reduction in communication volume leads to savings in execution time.},
author = {Vastenhouw, Brendan and Bisseling, Rob H.},
doi = {10.1137/S0036144502409019},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Vastenhouw, Bisseling - 2005 - A Two-Dimensional Data Distribution Method for Parallel Sparse Matrix-Vector Multiplication.pdf:pdf},
issn = {00361445},
journal = {SIAM Review},
keywords = {matrix partitioning,matrix vector multiplication,parallel computing,recursive bipartitioning,sparse matrix},
number = {1},
pages = {67},
publisher = {Society for Industrial and Applied Mathematics},
title = {{A Two-Dimensional Data Distribution Method for Parallel Sparse Matrix-Vector Multiplication}},
url = {http://link.aip.org/link/SIREAD/v47/i1/p67/s1\&Agg=doi},
volume = {47},
year = {2005}
}
@article{Vignesh2010,
abstract = {In this we present an efficient solution for eliminating false positives in intrusion detection systems using a parallelized version of Genetic Algorithm. Genetic algorithm uses selection, mutation and crossover operations eliminating most of the false positives in a reasonable time. Almost all existing versions are sequential without exploiting the capabilities of newer multiprocessors or distributed systems. By parallelizing genetic operations in the context of intrusion detection systems we reduce the total complexities. This parallelized approach gives better solution than sequential one by taking advantage of the parallel architecture. We propose the use of cache oblivious technique in our algorithm to provide efficient memory transfers. The complexity of this algorithm is O((N/B) logM/B N1/3/3 + N1/ 3) which is very much lesser when compared to other sorting algorithms.},
author = {Vignesh, R and Ganesh, B and Aarthi, Gupta and Iyswarya, N},
journal = {International Journal of Computer Applications},
keywords = {Cache Oblivious Algorithms,Clustering,IDS,not printed},
mendeley-tags = {not printed},
number = {11},
pages = {1--5},
title = {{A Cache Oblivious based GA Solution for Clustering Problem in IDS}},
volume = {1},
year = {2010}
}
@article{Vitter2006,
author = {Vitter, Jeffrey Scott},
doi = {10.1561/0400000014},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Vitter - 2006 - Algorithms and Data Structures for External Memory.pdf:pdf},
issn = {1551-305X},
journal = {Foundations and Trends® in Theoretical Computer Science},
number = {4},
pages = {305--474},
title = {{Algorithms and Data Structures for External Memory}},
url = {http://www.nowpublishers.com/product.aspx?product=TCS\&doi=0400000014},
volume = {2},
year = {2006}
}
@article{Vlasblom2009,
abstract = {Genome scale data on protein interactions are generally represented as large networks, or graphs, where hundreds or thousands of proteins are linked to one another. Since proteins tend to function in groups, or complexes, an important goal has been to reliably identify protein complexes from these graphs. This task is commonly executed using clustering procedures, which aim at detecting densely connected regions within the interaction graphs. There exists a wealth of clustering algorithms, some of which have been applied to this problem. One of the most successful clustering procedures in this context has been the Markov Cluster algorithm (MCL), which was recently shown to outperform a number of other procedures, some of which were specifically designed for partitioning protein interactions graphs. A novel promising clustering procedure termed Affinity Propagation (AP) was recently shown to be particularly effective, and much faster than other methods for a variety of problems, but has not yet been applied to partition protein interaction graphs.},
author = {Vlasblom, James and Wodak, Shoshana J},
doi = {10.1186/1471-2105-10-99},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Vlasblom, Wodak - 2009 - Markov clustering versus affinity propagation for the partitioning of protein interaction graphs.pdf:pdf},
issn = {1471-2105},
journal = {BMC Bioinformatics},
keywords = {Algorithms,Cluster Analysis,Computational Biology,Computational Biology: methods,Databases,Markov Chains,Protein,Protein Interaction Mapping,Protein Interaction Mapping: methods,Proteins,Proteins: chemistry,Proteins: metabolism,Saccharomyces cerevisiae,Saccharomyces cerevisiae: metabolism,printed},
mendeley-tags = {printed},
month = jan,
pages = {99},
pmid = {19331680},
title = {{Markov clustering versus affinity propagation for the partitioning of protein interaction graphs.}},
url = {http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=2682798\&tool=pmcentrez\&rendertype=abstract},
volume = {10},
year = {2009}
}
@book{Wang2010,
address = {Boston, MA},
author = {Wang, Haixun and Aggarwal, Charu C},
doi = {10.1007/978-1-4419-6045-0},
editor = {Aggarwal, Charu C. and Wang, Haixun},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Wang, Aggarwal - 2010 - Managing and Mining Graph Data.pdf:pdf},
isbn = {978-1-4419-6044-3},
pages = {620},
publisher = {Springer US},
series = {Advances in Database Systems},
title = {{Managing and Mining Graph Data}},
url = {http://www.springerlink.com/index/10.1007/978-1-4419-6045-0},
volume = {40},
year = {2010}
}
@inproceedings{Wang2010a,
abstract = {In this paper, we propose an approach for significantly improving the performance of parallel matrix-matrix multiplication using a GPU-accelerated cluster. For one node, we implement a CPUs-GPU parallel double-precision general matrix-matrix multiplication (dgemm) operation and achieve a performance improvement of 32\% as compared to the GPU-only case and 56\% as compared to the CPUs-only case. For the entire cluster, we use the overlap GPU acceleration solution to high-performance Linpack (HPL), which eliminates the close dependency between the LU decomposition and the dgemm operation, and achieve a performance improvement of 5.72\% as compared to the flat GPU acceleration case.},
annote = {        From Duplicate 2 (                           Parallel Matrix-Matrix Multiplication Based on HPL with a GPU-Accelerated PC Cluster                         - Wang, Qin; Ohmura, Junichi; Axida, Shan; Miyoshi, Takefumi; Irie, Hidetsugu; Yoshinaga, Tsutomu )
                
        
        
      },
author = {Wang, Qin and Ohmura, Junichi and Axida, Shan and Miyoshi, Takefumi and Irie, Hidetsugu and Yoshinaga, Tsutomu},
booktitle = {2010 1st International Conference on Networking and Computing},
doi = {10.1109/IC-NC.2010.39},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Wang et al. - 2010 - Parallel Matrix-Matrix Multiplication Based on HPL with a GPU-Accelerated PC Cluster.pdf:pdf},
isbn = {978-1-4244-8918-3},
month = nov,
pages = {243--248},
publisher = {IEEE},
title = {{Parallel Matrix-Matrix Multiplication Based on HPL with a GPU-Accelerated PC Cluster}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5695242 http://ieeexplore.ieee.org/xpl/freeabs\_all.jsp?arnumber=5695242},
year = {2010}
}
@article{Wang2010b,
author = {Wang, Xuan},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Wang - 2010 - Clustering in the Cloud Clustring Algorithms to Hadoop MapReduce Framework.pdf:pdf},
journal = {Spring},
title = {{Clustering in the Cloud: Clustring Algorithms to Hadoop Map/Reduce Framework}},
url = {http://ecommons.txstate.edu/cgi/viewcontent.cgi?article=1018\&amp;context=cscitrep},
year = {2010}
}
@article{WebApplikationer,
author = {{Web Applikationer}, DR TU},
keywords = {Danmarks Radios internet radio afspiller til live },
title = {{DR internet radio afspiller}},
url = {http://www.dr.dk/radio/player/?psdb/sommergaesten-pa-p1-lektor-mehmet-umit-necef}
}
@inproceedings{Willcock2006,
address = {New York, New York, USA},
author = {Willcock, Jeremiah and Lumsdaine, Andrew},
booktitle = {Proceedings of the 20th annual international conference on Supercomputing - ICS '06},
doi = {10.1145/1183401.1183444},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Willcock, Lumsdaine - 2006 - Accelerating sparse matrix computations via data compression.pdf:pdf},
isbn = {1595932828},
keywords = {data compression,high-performance computing,memory bandwidth,printed,sparse matrix},
mendeley-tags = {printed},
month = jun,
pages = {307},
publisher = {ACM Press},
title = {{Accelerating sparse matrix computations via data compression}},
url = {http://dl.acm.org/citation.cfm?id=1183401.1183444},
year = {2006}
}
@article{Wills1997,
abstract = {This paper describes Pica, a fine-grain, message-passing architecture designed to efficiently support high-throughput, low-memory parallel applications, such as image processing, object recognition, and data compression. By specializing the processor and reducing local memory (4,096 36-bit words), multiple nodes can be implemented on a single chip. This allows high-performance systems for high-throughput applications to be realized at lower cost. The architecture minimizes overhead for basic parallel operations. An operand-addressed context cache and round-robin task manager support fast task swapping. Fixed-sized activation contexts simplify storage management. Word-tag synchronization bits provide low-cost synchronization. Several applications have been developed for this architecture, including thermal relaxation, matrix multiplication, JPEG image compression, and Positron Emission Tomography image reconstruction. These applications have been executed using an instrumented instruction-level simulator. The results of these experiments and an evaluation of Pica's architectural features are presented},
author = {Wills, Scott and Cat, Huy H and Cruz-Rivera, Jos\'{e} and Lacy, Stephen W. and Baker, James M and Eble, John C and Lopez-Lagunas, Abelardo and Hopper, Micahael},
doi = {10.1109/71.629488},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Wills et al. - 1997 - High-throughput, low-memory applications on the Pica architecture.pdf:pdf},
issn = {10459219},
journal = {IEEE Transactions on Parallel and Distributed Systems},
number = {10},
pages = {1055--1067},
title = {{High-throughput, low-memory applications on the Pica architecture}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=629488},
volume = {8},
year = {1997}
}
@book{Wu2003,
abstract = {This volume contains recent developments in clustering and information retrieval, including clustering algorithms, evaluation methodologies, and architectures for information retrieval. It provides a survey of the state-of-the-art research in clustering and information retrieval. 
Audience: This volume is suitable for professionals and researchers in data mining and information retrieval. It is also appropriate for use in graduate courses.
},
editor = {Wu, Weili and Xiong, Hui and Shekhar, Shashi},
publisher = {Springer Verlag GMBH},
title = {{Clustering and Information Retrieval
}},
year = {2003}
}
@book{Wu2009,
doi = {10.1201/9781420089653},
editor = {Wu, Xindong and Kumar, Vipin},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Unknown - 2009 - The Top Ten Algorithms in Data Mining.pdf:pdf},
isbn = {978-1-4200-8964-6},
month = apr,
publisher = {Chapman and Hall/CRC},
series = {Chapman \& Hall/CRC Data Mining and Knowledge Discovery Series},
title = {{The Top Ten Algorithms in Data Mining}},
url = {http://www.crcnetbase.com/doi/book/10.1201/9781420089653},
volume = {9},
year = {2009}
}
@inproceedings{Wu1997,
abstract = {In this paper, we start with presenting and defining scalability of algorithm-architecture combinations based on the fixed ratio of computation time to communication cost, analyze the performance and scalability of a number of parallel matrix multiplication algorithms, and compare them with the related work. The performance analysis and the analytical scalability expressions for these algorithms show that our scalability metric is better than the isoefficiency metric.},
address = {Berlin/Heidelberg},
author = {Wu, Xingfu},
booktitle = {3rd Annual International Conference, Cocoon},
doi = {10.1007/BFb0045065},
editor = {Jiang, Tao and Lee, D. T.},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Wu - 1997 - An Approach to Scalability of Parallel Matrix Multiplication Algorithms.pdf:pdf},
isbn = {3-540-63357-X},
keywords = {Computer Science,printed},
mendeley-tags = {printed},
pages = {492--501},
publisher = {Springer-Verlag},
series = {Lecture Notes in Computer Science},
title = {{An Approach to Scalability of Parallel Matrix Multiplication Algorithms}},
url = {http://www.springerlink.com/content/4635384j86l85188/},
volume = {1276},
year = {1997}
}
@article{Xu2007,
abstract = {Recent spectral clustering methods are a propular and powerful technique for data clustering. These methods need to solve the eigenproblem whose computational complexity is \$O(n\^{}3)\$, where \$n\$ is the number of data samples. In this paper, a non-eigenproblem based clustering method is proposed to deal with the clustering problem. Its performance is comparable to the spectral clustering algorithms but it is more efficient with computational complexity \$O(n\^{}2)\$. We show that with a transitive distance and an observed property, called K-means duality, our algorithm can be used to handle data sets with complex cluster shapes, multi-scale clusters, and noise. Moreover, no parameters except the number of clusters need to be set in our algorithm.},
archivePrefix = {arXiv},
arxivId = {0711.3594},
author = {Xu, Chunjing and Liu, Jianzhuang and Tang, Xiaoou},
eprint = {0711.3594},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Xu, Liu, Tang - 2007 - Clustering with Transitive Distance and K-Means Duality.pdf:pdf},
month = nov,
pages = {13},
title = {{Clustering with Transitive Distance and K-Means Duality}},
url = {http://arxiv.org/abs/0711.3594},
year = {2007}
}
@book{Xu2008,
abstract = {This is the first book to take a truly comprehensive look at clustering. It begins with an introduction to cluster analysis and goes on to explore: proximity measures; hierarchical clustering; partition clustering; neural network-based clustering; kernel-based clustering; sequential data clustering; large-scale data clustering; data visualization and high-dimensional data clustering; and cluster validation. The authors assume no previous background in clustering and their generous inclusion of examples and references help make the subject matter comprehensible for readers of varying levels and backgrounds.},
address = {Hoboken, NJ, USA},
author = {Xu, Rui and Wunsch, Donald C.},
doi = {10.1002/9780470382776},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Xu, Wunsch - 2008 - Clustering.pdf:pdf},
isbn = {9780470382776},
month = oct,
publisher = {John Wiley \& Sons, Inc.},
title = {{Clustering}},
url = {http://doi.wiley.com/10.1002/9780470382776},
year = {2008}
}
@article{Yao2010,
author = {Yao, Bin and Li, Feifei and Kumar, Piyush},
doi = {10.1109/ICDE.2010.5447837},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Yao, Li, Kumar - 2010 - K Nearest Neighbor Queries and KNN-Joins in Large Relational Databases ( Almost ) for Free.pdf:pdf},
isbn = {9781424454440},
journal = {Order A Journal On The Theory Of Ordered Sets And Its Applications},
pages = {4--15},
title = {{K Nearest Neighbor Queries and KNN-Joins in Large Relational Databases ( Almost ) for Free}},
url = {http://dblp.uni-trier.de/db/conf/icde/icde2010.html\#YaoLK10},
year = {2010}
}
@article{Yao2009,
abstract = {Given a set of points P and a query point q, the reverse furthest neighbor (Rfn) query fetches the set of points p isin P such that q is their furthest neighbor among all points in PUq. This is the monochromatic Rfn (Mrfn) query. Another interestin.....},
author = {Yao, Bin and Li, Feifei and Kumar, Piyush},
doi = {10.1109/ICDE.2009.62},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Yao, Li, Kumar - 2009 - Reverse Furthest Neighbors in Spatial Databases.pdf:pdf},
isbn = {9781424434220},
issn = {10844627},
journal = {2009 IEEE 25th International Conference on Data Engineering},
pages = {664--675},
publisher = {Ieee},
title = {{Reverse Furthest Neighbors in Spatial Databases}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4812444},
year = {2009}
}
@article{Kumar2010,
abstract = {We present a simple first-order approximation algorithm for the support vector classification problem. Given a pair of linearly separable data sets and ϵ ∈ (0, 1), the proposed algorithm computes a separating hyperplane whose margin is within a factor of (1 − ϵ) of that of the maximum-margin separating hyperplane. We discuss how our algorithm can be extended to nonlinearly separable and inseparable data sets. The running time of our algorithm is linear in the number of data points and in 1/ϵ. In particular, the number of support vectors computed by the algorithm is bounded above by O($\zeta$/ϵ) for all sufficiently small ϵ > 0, where $\zeta$ is the square of the ratio of the distances between the farthest and closest pairs of points in the two data sets. Furthermore, we establish that our algorithm exhibits linear convergence. Our computational experiments, presented in the online supplement, reveal that the proposed algorithm performs quite well on standard data sets in comparison with other first-order algorithms. We adopt the real number model of computation in our analysis.},
author = {Yildirim, E Alper and Kumar, Piyush},
doi = {10.1287/ijoc.1100.0412},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Yildirim, Kumar - 2010 - A Linearly Convergent Linear-Time First-Order Algorithm for Support Vector Classification with a Core Set Result.pdf:pdf},
issn = {1091-9856},
journal = {INFORMS Journal on Computing},
keywords = {Frank–Wolfe algorith,core sets,linear convergence,m approximation algorithms,not printed,support vector classification,support vector machines},
mendeley-tags = {not printed},
month = sep,
number = {3},
pages = {377--391},
title = {{A Linearly Convergent Linear-Time First-Order Algorithm for Support Vector Classification with a Core Set Result}},
url = {http://joc.journal.informs.org/cgi/doi/10.1287/ijoc.1100.0412},
volume = {23},
year = {2010}
}
@inproceedings{Yoo2010,
abstract = {Graphs have gained a lot of attention in recent years and have been a focal point in many emerging disciplines such as web mining, computational biology, social network analysis, and national security, just to name a few. These so-called scale-free graphs in the real world have very complex structure and their sizes already have reached unprecedented scale. Furthermore, most of the popular graph algorithms are computationally very expensive, making scalable graph analysis even more challenging. To scale these graph algorithms, which have different run-time characteristics and resource requirements than traditional scientific and engineering applications, we may have to adopt vastly different computing techniques than the current state-of-art. In this talk, I will discuss some of the findings from our studies on the performance and scalability of graph algorithms on various computing environments at LLNL, hoping to shed some light on the challenges in scaling large graph algorithms. Andy Yoo is a computer scientist in the Center for Applied Scientific Computing (CASC). His current research interests are scalable graph algorithms, high performance computing, large-scale data management, and performance evaluation. He has worked on the large graph problems since 2004. In 2005, he developed a scalable graph search algorithm and demonstrated it by searching a graph with billions of edges on IBM BlueGene/L, then the largest and fastest supercomputer. Andy was nominated for 2005 Gordon Bell award for this work. He is currently working on finding right combination of architecture, systems, and programming model to run large graph algorithms.},
author = {Yoo, Andy},
booktitle = {2010 43rd Hawaii International Conference on System Sciences},
doi = {10.1109/HICSS.2010.325},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Yoo - 2010 - Scalable Analysis of Massive Graphs on A Parallel Data Flow System Dataflow for Large Graph Analysis and Testbed.pdf:pdf},
isbn = {978-1-4244-5509-6},
keywords = {google tech talk mathematics web mining,printed,scalable analysis massive graphs a parallel},
mendeley-tags = {printed},
pages = {1--6},
publisher = {GoogleTechTalks},
title = {{Scalable Analysis of Massive Graphs on A Parallel Data Flow System Dataflow for Large Graph Analysis and Testbed}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5428331 http://www.youtube.com/watch?v=PBLgUBGWcz8},
year = {2010}
}
@article{Yuster2005,
abstract = {A new space-efficient representation for sparse matrices is introduced and a fast sparse matrix multiplication algorithm based on the new representation is presented. The scheme is very efficient when the nonzero elements of a sparse matrix are partially or fully adjacent to one another as in band or triangular matrices. The space complexity of the new representation is better than that of existing algorithms when the number of sets of adjacent nonzero elements, called segments, is less than two thirds of the total number of nonzero elements. The time complexity of the associated sparse matrix multiplication algorithm is also better or even much better than that of existing schemes depending on the number of segments in the factor matrices.},
author = {Yuster, Raphael and Zwick, Uri},
doi = {10.1145/1077464.1077466},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Yuster, Zwick - 2005 - Fast sparse matrix multiplication.pdf:pdf},
issn = {15496325},
journal = {ACM Transactions on Algorithms},
keywords = {printed},
mendeley-tags = {printed},
number = {1},
pages = {2--13},
publisher = {ACM},
title = {{Fast sparse matrix multiplication}},
url = {http://portal.acm.org/citation.cfm?doid=1077464.1077466},
volume = {1},
year = {2005}
}
@article{Yzelman2009,
annote = {Deals with matrix-vector multiplication. 
But read the recursive bipartitioning and cache-oblivious parts},
author = {Yzelman, A. N. and Bisseling, Rob H.},
doi = {10.1137/080733243},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Yzelman, Bisseling - 2009 - Cache-Oblivious Sparse Matrix–Vector Multiplication by Using Sparse Matrix Partitioning Methods.pdf:pdf},
issn = {10648275},
journal = {SIAM Journal on Scientific Computing},
keywords = {cache-oblivious,matrix–vector multiplication,parallel computing,printed,recursive bipartitioning,sparse matrix},
language = {en},
mendeley-tags = {printed},
month = jul,
number = {4},
pages = {3128},
title = {{Cache-Oblivious Sparse Matrix–Vector Multiplication by Using Sparse Matrix Partitioning Methods}},
url = {http://link.aip.org/link/?SJOCE3/31/3128/1},
volume = {31},
year = {2009}
}
@incollection{Zekri2008,
address = {Berlin, Heidelberg},
author = {Zekri, Ahmed S. and Sedukhin, Stanislav G.},
booktitle = {High-Performance Computing},
doi = {10.1007/978-3-540-77704-5},
editor = {Labarta, Jes\'{u}s and Joe, Kazuki and Sato, Toshinori},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Zekri, Sedukhin - 2008 - Computationally efficient parallel matrix-matrix multiplication on the torus.pdf:pdf},
isbn = {978-3-540-77703-8},
keywords = {3D torus,array processor,matrix-matrix multiplication,printed,space-time mapping},
mendeley-tags = {printed},
month = sep,
pages = {219--226},
publisher = {Springer Berlin Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{Computationally efficient parallel matrix-matrix multiplication on the torus}},
url = {http://www.springerlink.com/index/10.1007/978-3-540-77704-5},
volume = {4759},
year = {2008}
}
@unpublished{Zhang,
author = {Zhang, Xialon},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Zhang - 2006 - Online Algorithms.pdf:pdf},
keywords = {Clustering,Lecture Notes},
pages = {15},
title = {{Online Algorithms}},
year = {2006}
}
@article{Zheng2008,
abstract = {Protein-protein interactions (PPIs) play crucial roles in virtually every aspect of cellular function within an organism. One important objective of modern biology is the extraction of functional modules, such as protein complexes from global protein interaction networks. This paper describes how seven genomic features and four experimental interaction data sets were combined using a Bayesian-networks-based data integration approach to infer PPI networks in yeast. Greater coverage and higher accuracy were achieved than in previous high-throughput studies of PPI networks in yeast. A Markov clustering algorithm was then used to extract protein complexes from the inferred protein interaction networks. The quality of the computed complexes was evaluated using the hand-curated complexes from the Munich Information Center for Protein Sequences database and gene-ontology-driven semantic similarity. The results indicated that, by integrating multiple genomic information sources, a better clustering result was obtained in terms of both statistical measures and biological relevance.},
author = {Zheng, Huiru and Wang, Haiying and Glass, David H},
institution = {School of Computing and Mathematics, University of Ulster, Newtownabbey, UK. h.zheng@ulster.ac.uk},
journal = {IEEE transactions on systems man and cybernetics Part B Cybernetics a publication of the IEEE Systems Man and Cybernetics Society},
keywords = {biological,chromosome mapping,chromosome mapping methods,computer simulation,databases,models,protein,protein interaction mapping,protein interaction mapping methods,proteome,proteome metabolism,signal transduction,signal transduction physiology,systems integration},
number = {1},
pages = {5--16},
pmid = {18270078},
title = {{Integration of genomic data for inferring protein complexes from global protein-protein interaction networks.}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/18270078},
volume = {38},
year = {2008}
}
@inproceedings{Zhou2010,
abstract = {The frequent items problem is to process a stream as a stream of items and find all items occurring more than a given fraction of the time. It is one of the most heavily studied problems in data stream mining, dating back to the 1980s. Aiming at higher false positive rate of the Space-Saving algorithm, an LRU-based (Least Recently Used, LRU) improved algorithm with low frequency item pre-eliminated is proposed. Accuracy, stability and adaptability of the improved algorithm have been apparently enhanced. Experimental results indicate that the algorithm can not only be used to find the frequent items, and can be used to estimate the frequency of them precisely. The improved algorithm can be used for online processing both high-speed network packet stream and backbone NetFlow stream.},
author = {Zhou, Jun and Chen, Ming and Xiong, Huan},
booktitle = {2010 2nd International Workshop on Database Technology and Applications},
doi = {10.1109/DBTA.2010.5659027},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Zhou, Chen, Xiong - 2010 - A More Accurate Space Saving Algorithm for Finding the Frequent Items.pdf:pdf},
isbn = {978-1-4244-6975-8},
month = nov,
pages = {1--5},
publisher = {IEEE},
title = {{A More Accurate Space Saving Algorithm for Finding the Frequent Items}},
url = {http://ieeexplore.ieee.org/xpls/abs\_all.jsp?arnumber=5659027},
year = {2010}
}
@article{Zhuo2007,
abstract = {The abundant hardware resources on current reconfigurable computing systems provide new opportunities for high-performance parallel implementations of scientific computations. In this paper, we study designs for floating-point matrix multiplication, a fundamental kernel in a number of scientific applications, on reconfigurable computing systems. We first analyze design trade-offs in implementing this kernel. These trade-offs are caused by the inherent parallelism of matrix multiplication and the resource constraints, including the number of configurable slices, the size of on-chip memory, and the available memory bandwidth. We propose three parameterized algorithms which can be tuned according to the problem size and the available hardware resources. Our algorithms employ linear array architecture with simple control logic. This architecture effectively utilizes the available resources and reduces routing complexity. The processing elements (PEs) used in our algorithms are modular so that it is easy to embed floating-point units into them. Experimental results on a Xilinx Virtex-ll Pro XC2VP100 show that our algorithms achieve good scalability and high sustained GFLOPS performance. We also implement our algorithms on Cray XD1. XD1 is a high-end reconfigurable computing system that employs both general-purpose processors and reconfigurable devices. Our algorithms achieve a sustained performance of 2.06 GFLOPS on a single node of XD1},
author = {Zhuo, Ling and Prasanna, Viktor},
doi = {10.1109/TPDS.2007.1001},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Zhuo, Prasanna - 2007 - Scalable and Modular Algorithms for Floating-Point Matrix Multiplication on Reconfigurable Computing Systems.pdf:pdf},
issn = {1045-9219},
journal = {IEEE Transactions on Parallel and Distributed Systems},
keywords = {printed},
mendeley-tags = {printed},
month = apr,
number = {4},
pages = {433--448},
title = {{Scalable and Modular Algorithms for Floating-Point Matrix Multiplication on Reconfigurable Computing Systems}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4118686},
volume = {18},
year = {2007}
}
@article{Zinger2009,
abstract = {Understanding how microbial community structure and diversity respond to environmental conditions is one of the main challenges in environmental microbiology. However, there is often confusion between determining the phylogenetic structure of microbial communities and assessing the distribution and diversity of molecular operational taxonomic units (MOTUs) in these communities. This has led to the use of sequence analysis tools such as multiple alignments and hierarchical clustering that are not adapted to the analysis of large and diverse data sets and not always justified for characterization of MOTUs. Here, we developed an approach combining a pairwise alignment algorithm and graph partitioning by using MCL (Markov clustering) in order to generate discrete groups for nuclear large-subunit rRNA gene and internal transcript spacer 1 sequence data sets obtained from a yearly monitoring study of two spatially close but ecologically contrasting alpine soils (namely, early and late snowmelt locations). We compared MCL with a classical single-linkage method (Ccomps) and showed that MCL reduced bias such as the chaining effect. Using MCL, we characterized fungal communities in early and late snowmelt locations. We found contrasting distributions of MOTUs in the two soils, suggesting that there is a high level of habitat filtering in the assembly of alpine soil fungal communities. However, few MOTUs were specific to one location.},
author = {Zinger, L and Coissac, E and Choler, P and Geremia, R A},
doi = {10.1128/AEM.00748-09},
issn = {1098-5336},
journal = {Applied and environmental microbiology},
keywords = {Animals,Biodiversity,Cluster Analysis,Computational Biology,DNA,Fungal,Fungal: chemistry,Fungal: genetics,Fungi,Fungi: classification,Fungi: genetics,Fungi: isolation \& purification,Genes,Molecular Sequence Data,Phylogeny,RNA,Ribosomal,Ribosomal: chemistry,Ribosomal: genetics,Sequence Analysis,Soil Microbiology,rRNA},
month = sep,
number = {18},
pages = {5863--70},
pmid = {19617385},
title = {{Assessment of microbial communities by graph partitioning in a study of soil fungi in two Alpine meadows.}},
url = {http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=2747849\&tool=pmcentrez\&rendertype=abstract},
volume = {75},
year = {2009}
}
@article{Zuckerman2008,
abstract = {Multicore systems are becoming ubiquituous in scientific computing. As performance libraries are adapted to such systems, the difficulty to extract the best performance out of them is quite high. Indeed, performance libraries such as Intels MKL, while performing very well on unicore architectures, see their behaviour degrade when used on multicore systems. Moreover, even multicore systems show wide differences among each other (presence of shared caches, memory bandwidth, etc.) We propose a systematic method to improve the parallel execution of matrix multiplication, through the study of the behavior of unicore DGEMM kernels in MKL, as well as various other criteria. We show that our fine-tuning can out-perform the parallel DGEMM of MKL, with performance gains sometimes up to a factor of two.},
author = {Zuckerman, Stephane and P\'{e}rache, Marc and Jalby, William},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Zuckerman, P\'{e}rache, Jalby - 2008 - Fine Tuning Matrix Multiplications on Multicore.pdf:pdf},
journal = {Matrix},
keywords = {blas,cache coherency,multicore},
pages = {30--41},
publisher = {Springer-Verlag New York Inc},
title = {{Fine Tuning Matrix Multiplications on Multicore}},
url = {http://books.google.com/books?hl=en\&amp;lr=\&amp;id=cNwZ1snBYQYC\&amp;oi=fnd\&amp;pg=PA30\&amp;dq=Fine+Tuning+Matrix+Multiplications+on+Multicore\&amp;ots=qwuSUVuFX8\&amp;sig=QhH8o5fJ42d75sD9vQuhPo07HGw},
volume = {5374},
year = {2008}
}
@article{Zwick2002,
author = {Zwick, Uri},
doi = {10.1145/567112.567114},
file = {:home/andreas/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Zwick - 2002 - All pairs shortest paths using bridging sets and rectangular matrix multiplication.pdf:pdf},
issn = {00045411},
journal = {Journal of the ACM},
keywords = {Matrix multiplication,shortest paths},
month = may,
number = {3},
pages = {289--317},
title = {{All pairs shortest paths using bridging sets and rectangular matrix multiplication}},
url = {http://dl.acm.org/citation.cfm?id=567112.567114},
volume = {49},
year = {2002}
}
