% This file was created with JabRef 2.7.2.
% Encoding: Cp1252

@BOOK{AG09,
  title = {Software Engineering and Testing: An Introduction},
  publisher = {Jones and Bartlett Publishers},
  year = {2009},
  author = {Agarwal, B. B. and Gupta, M. and Tayal, S. P.},
  owner = {haalmuta},
  timestamp = {2012.04.09}
}

@BOOK{AJ11,
  title = {Rigorous Software Development: an introduction to program verification},
  publisher = {Springer},
  year = {2011},
  author = {Almeida, J. and Frade, M. and Pinto, J. and Sousa, S.},
  address = {NY},
  owner = {haalmuta},
  timestamp = {2012.04.10}
}

@ELECTRONIC{ARWU,
  author = {ARWU},
  year = {2010},
  title = {Academic Ranking World Universities 2010},
  organization = {Academic Ranking World Universities},
  url = {http://www.arwu.org/ARWU2010.jsp},
  owner = {haalmuta},
  timestamp = {2012.04.09}
}

@ARTICLE{BSSZ,
  author = {Baharom, S and Shukur, Z},
  title = {An experimental assessment of module documentation-based testing.},
  journal = {INFORMATION AND SOFTWARE TECHNOLOGY},
  year = {n.d.},
  volume = {53},
  pages = {747 - 760},
  number = {7},
  abstract = {Context: Testing a module that has memory using the black-box approach
	has been found to be expensive and relatively ineffective. Instead,
	testing without knowledge of the specifications (white-box approach)
	may not be effective in showing whether a program has been properly
	implemented as stated in its specifications. We propose instead a
	grey-box approach called Module Documentation-based Testing or MD-Test,
	the heart of which is an automatic generation of the test oracle
	from the external and internal views of the module.},
  issn = {09505849},
  keywords = {Specification-based testing, Grey-box testing, Automated module testing,
	Mutation-testing, COMPUTER SCIENCE, INFORMATION SYSTEMS, COMPUTER
	SCIENCE, SOFTWARE ENGINEERING},
  owner = {haalmuta},
  timestamp = {2012.04.10}
}

@BOOK{BR07,
  title = {Pragmatic Software Testing: Becoming an effective and effecient test
	professional.},
  publisher = {Wiley Publishing Inc.},
  year = {2007},
  author = {Black, R.},
  address = {Inianapolis, Indiana},
  owner = {haalmuta},
  timestamp = {2012.04.10}
}

@ARTICLE{RC97,
  author = {Comerford, Richard},
  title = {Software engineering.},
  journal = {IEEE Spectrum},
  year = {1997},
  volume = {34},
  pages = {65},
  number = {1},
  abstract = {Focuses on software engineering as strategy of computer industry to
	deliver products. Subversion of the software market; Reason why Java
	computer programming language accepts software engineering; How virtual
	reality has been projected as an innovative step in Web programming.
	INSET: Viewpoint: Java, the Internet will..., by Edward Yourdon..},
  issn = {00189235},
  keywords = {SOFTWARE engineering, COMPUTER industry, EQUIPMENT & supplies},
  owner = {haalmuta},
  timestamp = {2012.04.12}
}

@BOOK{CL04,
  title = {A Practitioner's Guide to Software Test Design},
  publisher = {Artech House},
  year = {2004},
  author = {Copeland, L.},
  isbn = {158053791X},
  owner = {haalmuta},
  timestamp = {2012.04.10}
}

@ARTICLE{DJ72,
  author = {Dijkstra, E.},
  title = {Notes on structured programming},
  journal = {Dijkstra Notes},
  year = {1972},
  pages = {7},
  owner = {haalmuta},
  timestamp = {2012.04.10}
}

@ELECTRONIC{DP12,
  author = {Paul DiLascia},
  month = {April},
  year = {2012},
  title = {Unreferenced Parameters, Adding Task Bar Commands, and More},
  organization = {Microsoft MSDN Magazine},
  url = {http://msdn.microsoft.com/en-us/magazine/cc163805.aspx},
  abstract = {UNREFERENCED_PARAMETER expands to the parameter or expression passed.
	Its purpose is to avoid compiler warnings about unreferenced parameters.
	Many programmers, including yours truly, like to compile with the
	highest warning level, Level 4 (/W4). Level 4 warnings fall into
	the category of "things that can be safely ignored." Little infelicities
	that won't break your code, though they might make you look bad.},
  owner = {haalmuta},
  timestamp = {2012.04.11}
}

@BOOK{DE99,
  title = {Automated Software Testing: Introduction, Management, and Performance},
  publisher = {Addison Wesley Longman, Inc.},
  year = {1999},
  author = {Dustin, E. and Rashka, J. and Paul, J.},
  address = {Reading, Massachusetts},
  owner = {haalmuta},
  timestamp = {2012.04.10}
}

@ARTICLE{HE05,
  author = {Erdogmus, Hakan and Morisio, Maurizio and Torchiano, Marco},
  title = {On the Effectiveness of the Test-First Approach to Programming.},
  journal = {IEEE Transactions on Software Engineering},
  year = {2005},
  volume = {31},
  pages = {226 - 237},
  number = {3},
  abstract = {Test-Driven Development (TDD) is based on formalizing a piece of functionality
	as a test, implementing the functionality such that the test passes,
	and iterating the process. This paper describes a controlled experiment
	for evaluating an important aspect of TDD: In TDD, programmers write
	functional tests before the corresponding implementation code. The
	experiment was conducted with undergraduate students. While the experiment
	group applied a test-first strategy, the control group applied a
	more conventional development technique, writing tests after the
	implementation. Both groups followed an incremental process, adding
	new features one at a time and regression testing them. We found
	that test-first students on average wrote more tests and, in turn,
	students who wrote more tests tended to be more productive. We also
	observed that the minimum quality increased linearly with the number
	of programmer tests, independent of the development strategy employed.
	[ABSTRACT FROM AUTHOR]},
  issn = {00985589},
  keywords = {DYNAMIC programming, COMPUTER software, TESTING, SOFTWARE engineering,
	DEBUGGING in computer science, coding tools and techniques, General
	programming techniques, productivity, programming paradigms, software
	engineering process, Software Quality/SQA, testing and debugging,
	testing strategies},
  owner = {haalmuta},
  timestamp = {2012.04.12}
}

@BOOK{EG07,
  title = {Software Testing},
  publisher = {John Wiley \& Sons, Inc.},
  year = {2007},
  author = {Everett, G. and McLeod, R.},
  address = {Hoboken, NJ},
  owner = {haalmuta},
  timestamp = {2012.04.09}
}

@BOOK{GD04,
  title = {Software Quality Assurance},
  publisher = {Pearson Education Limited.},
  year = {2004},
  author = {Galin, D.},
  address = {Essex, England},
  owner = {haalmuta},
  timestamp = {2012.04.10}
}

@ELECTRONIC{GCP12,
  author = {Google},
  month = {April},
  year = {2012},
  title = {JUnit Test Case Generation},
  url = {https://developers.google.com/java-dev-tools/codepro/doc/features/junit/test_case_generation},
  owner = {haalmuta},
  timestamp = {2012.04.12}
}

@ARTICLE{JH11,
  author = {HARTY, JULIAN},
  title = {Finding Usability Bugs with Automated Tests.},
  journal = {Communications of the ACM},
  year = {2011},
  volume = {54},
  pages = {44 - 49},
  number = {2},
  abstract = {The article discusses automated test methods for finding bugs in computer
	programs which negatively affect usability. Several ways in which
	automated tests can help identify problems in web-based applications
	are described. Automated tests can provide information about the
	software as it is being developed, helping to quickly identify potential
	problems. Used as an adjunct to traditional human usability testing,
	it can offer software developers fast and consistent feedback. Examples
	of automated testing methods are described.},
  issn = {00010782},
  keywords = {COMPUTER software, TESTING, COMPUTER software usability, COMPUTER
	software development, USER interfaces (Computer systems), AUTOMATION,
	HUMAN-computer interaction, APPLICATION software, DEBUGGING in computer
	science, USER-centered system design, DEVELOPMENT, WEB-based user
	interfaces, ERROR messages (Computer science)},
  owner = {haalmuta},
  timestamp = {2012.04.11}
}

@ARTICLE{HZ97,
  author = {Hong, Zhu and Hall, Patrick A. V. and May, John H. R.},
  title = {Software Unit Test Coverage and Adequacy.},
  journal = {ACM Computing Surveys},
  year = {1997},
  volume = {29},
  pages = {366 - 427},
  number = {4},
  abstract = {Objective measurement of test quality is one of the key issues in
	software testing. It has been a major research focus for the last
	two decades. Many test criteria have been proposed and studied for
	this purpose. Various kinds of rationales have been presented in
	support of one criterion or another. We survey the research work
	in this area. The notion of adequacy criteria is examined together
	with its role in software dynamic testing. A review of criteria classification
	is followed by a summary of the methods for comparison and assessment
	of criteria. [ABSTRACT FROM AUTHOR]},
  issn = {03600300},
  keywords = {SOFTWARE engineering, RESEARCH, COMPUTER software, TESTING, RELIABILITY
	(Engineering), COMPUTER-aided software engineering, Comparing testing
	effectiveness, fault-detection, software unit test, test adequacy
	criteria, test coverage, testing methods},
  owner = {haalmuta},
  timestamp = {2012.04.10}
}

@ARTICLE{KC99,
  author = {Cem Kaner},
  title = {Don't Use Bug Counts to Measure Testers},
  journal = {Software Testing and Quality Engineering},
  year = {1999},
  pages = {80-81},
  owner = {haalmuta},
  timestamp = {2012.04.11},
  url = {http://www.kaner.com/pdfs/bugcount.pdf}
}

@BOOK{KC93,
  title = {Testing Computer Software},
  publisher = {International Thomson Publishi},
  year = {1993},
  author = {Kaner, C.},
  isbn = {0442013612},
  owner = {haalmuta},
  timestamp = {2012.04.10}
}

@ARTICLE{VL07,
  author = {Lucian, Voinea and Alexandru Telea},
  title = {Visual Analytics: Visual data mining and analysis of software repositories.},
  journal = {Computers \& Graphics},
  year = {2007},
  volume = {31},
  pages = {410 - 428},
  abstract = {In this article we describe an ongoing effort to integrate information
	visualization techniques into the process of configuration management
	for software systems. Our focus is to help software engineers manage
	the evolution of large and complex software systems by offering them
	effective and efficient ways to query and assess system properties
	using visual techniques. To this end, we combine several techniques
	from different domains, as follows. First, we construct an infrastructure
	that allows generic querying and data mining of different types of
	software repositories such as CVS and Subversion. Using this infrastructure,
	we construct several models of the software source code evolution
	at different levels of detail, ranging from project and package up
	to function and code line. Second, we describe a set of views that
	allow examining the code evolution models at different levels of
	detail and from different perspectives. We detail three views: the
	file view shows changes at line lev},
  issn = {0097-8493},
  keywords = {Data mining, Software evolution, Software visualization, Software
	engineering, Maintenance},
  owner = {haalmuta},
  timestamp = {2012.04.12}
}

@BOOK{ML09,
  title = {Test-Driven Development: An Empirical Evaluation of Agile Practice},
  publisher = {Springer},
  year = {2009},
  author = {Lech Madeyski},
  isbn = {3642042872},
  owner = {haalmuta},
  timestamp = {2012.04.11}
}

@ELECTRONIC{MD12,
  author = {Massey},
  month = {January},
  year = {2012},
  title = {Development},
  organization = {Massey University},
  url = {http://www.massey.ac.nz/massey/staffroom/national-shared-services/pod/my-career/development/en/development_home.cfm},
  owner = {haalmuta},
  timestamp = {2012.04.09}
}

@ELECTRONIC{MM12,
  author = {Massey},
  month = {March},
  year = {2012},
  title = {University Management},
  organization = {Massey University},
  url = {http://www.massey.ac.nz/massey/fms/About%20Massey/University-Management/documents/Senior_Leadership_Team_2010.pdf},
  owner = {haalmuta},
  timestamp = {2012.04.09}
}

@ELECTRONIC{MM11,
  author = {Massey},
  month = {August},
  year = {2011},
  title = {Mission Statement},
  organization = {Massey University},
  url = {http://www.massey.ac.nz/massey/learning/colleges/college-business/about-cob/mission-statement/mission-statement_home.cfm},
  owner = {haalmuta},
  timestamp = {2012.04.09}
}

@ELECTRONIC{MC09,
  author = {Massey},
  month = {May},
  year = {2009},
  title = {University Charter},
  organization = {Massey University},
  url = {http://www.massey.ac.nz/massey/about-massey/university-management/charter/},
  owner = {haalmuta},
  timestamp = {2012.04.09}
}

@BOOK{MA08,
  title = {Foundations of Software Testing},
  publisher = {Dorling Kindersley},
  year = {2008},
  author = {Mathur, A.},
  address = {India},
  owner = {haalmuta},
  timestamp = {2012.04.10}
}

@ARTICLE{PM11,
  author = {Matjaz Pancur, and Mojca, Ciglaric},
  title = {Impact of test-driven development on productivity, code and tests:
	A controlled experiment.},
  journal = {Information and Software Technology},
  year = {2011},
  volume = {53},
  pages = {557 - 573},
  number = {Special Section: Best papers from the APSEC},
  abstract = {Context Test-driven development is an approach to software development,
	where automated tests are written before production code in highly
	iterative cycles. Test-driven development attracts attention as well
	as followers in professional environment; however empirical evidence
	of its superiority regarding its effect on productivity, code and
	tests compared to test-last development is still fairly limited.
	Moreover, it is not clear if the supposed benefits come from writing
	tests before code or maybe from high iterativity/short development
	cycles.},
  issn = {0950-5849},
  keywords = {Empirical software engineering, Controlled experiment, Test-driven
	development, Iterative test-last development},
  owner = {haalmuta},
  timestamp = {2012.04.12}
}

@ARTICLE{MM02,
  author = {Muller, M. M. and Hagner, O.},
  title = {Experiment about test-first programming.},
  journal = {IEE PROCEEDINGS SOFTWARE},
  year = {2002},
  volume = {149},
  pages = {131 - 136},
  issn = {14625970},
  owner = {haalmuta},
  timestamp = {2012.04.12}
}

@BOOK{NJ04,
  title = {Test-Driven Development in Microsoft .NET},
  publisher = {Microsoft Press},
  year = {2004},
  author = {Newkirk, J. and Vorontsov, A.},
  address = {Washignton, US},
  owner = {haalmuta},
  timestamp = {2012.04.10}
}

@BOOK{PR05,
  title = {Software Testing},
  publisher = {Sams},
  year = {2005},
  author = {Patton, R.},
  edition = {2nd},
  isbn = {0672327988},
  owner = {haalmuta},
  timestamp = {2012.04.10}
}

@INBOOK{DP05,
  pages = {444},
  title = {Model Checking Real Time Java Using Java PathFinder.},
  year = {2005},
  author = {Peled, Doron A. and Tsay, Yih-Kuen and Lindstrom, Gary and Mehlitz,
	Peter C. and Visser, Willem},
  abstract = {The Real Time Specification for Java (RTSJ) is an augmentation of
	Java for real time applications of various degrees of hardness. The
	central features of RTSJ are real time threads; user defined schedulers;
	asynchronous events, handlers, and control transfers; a priority
	inheritance based default scheduler; non-heap memory areas such as
	immortal and scoped, and non-heap real time threads whose execution
	is not impeded by garbage collection. The Robust Software Systems
	group at NASA Ames Research Center has Java PathFinder (JPF) under
	development, a Java model checker. JPF at its core is a state exploring
	JVM which can examine alternative paths in a Java program (e.g.,
	via backtracking) by trying all nondeterministic choices, including
	thread scheduling order. This paper describes our implementation
	of an RTSJ profile (subset) in JPF, including requirements, design
	decisions, and current implementation status. Two examples are analyzed:
	jobs on a multiprogramming operating system, and },
  isbn = {9783540292098},
  owner = {haalmuta},
  timestamp = {2012.04.12}
}

@BOOK{PM07,
  title = {Software Testing and Analysis: Process, Principles and Techniques},
  publisher = {Wiley},
  year = {2007},
  author = {Pezze, M. and Young, M.},
  isbn = {0471455938},
  owner = {haalmuta},
  timestamp = {2012.04.10}
}

@BOOK{PI06,
  title = {Successful Test Management: An Integral Approach},
  publisher = {Springer},
  year = {2006},
  author = {Pinkster, I. and van de Burgt, B. and Janssen, D. and van Veenendaal,
	E.},
  isbn = {3540228225},
  owner = {haalmuta},
  timestamp = {2012.04.10}
}

@BOOK{PR00,
  title = {Software Engineering: a Practitioner's Approach},
  publisher = {McGraw-Hill},
  year = {2000},
  author = {Roger S. Pressman},
  isbn = {0073655783},
  owner = {haalmuta},
  timestamp = {2012.04.11}
}

@STANDARD{SM10,
  title = {Analysing the performance of New Zealand universities in the 2010
	Academic Ranking of World Universities},
  organization = {Tertiary Sector Performance Analysis and Reporting, Strategy and
	System Performance, Ministry of Education},
  author = {Smart, W.},
  year = {2010},
  owner = {haalmuta},
  timestamp = {2012.04.09}
}

@BOOK{SV07,
  title = {Software Engineering},
  publisher = {Pearson Education Limited.},
  year = {2007},
  author = {Sommerville, I.},
  address = {Essex, England},
  edition = {8th},
  owner = {haalmuta},
  timestamp = {2012.04.09}
}

@ELECTRONIC{TE11,
  author = {Tempero, E.},
  month = {August},
  year = {2011},
  title = {Qualitas Corpus},
  organization = {Qualitas Research Group, University of Auckland},
  url = {http://qualitascorpus.com/},
  abstract = {The Qualitas Corpus is an curated collection of software systems intended
	to be used for empirical studies of code artefacts. The primary goal
	is to provide a resource that supports reproducible studies of software.
	The current release of the Corpus contains open-source Java software
	systems, often multiple versions.},
  keywords = {qualitas, corpus, software measurments},
  owner = {haalmuta},
  timestamp = {2012.04.12}
}

@INPROCEEDINGS{AT10,
  author = {Tempero, E. and Anslow, C. and Dietrich, J. and Han, T. and Jing
	Li and Lumpe, M. and Melton, H. and Noble, J.},
  title = {The Qualitas Corpus: A Curated Collection of Java Code for Empirical
	Studies},
  booktitle = {Software Engineering Conference (APSEC), 2010 17th Asia Pacific},
  year = {2010},
  pages = {336 -345},
  month = {30 2010-dec. 3},
  abstract = {In order to increase our ability to use measurement to support software
	development practise we need to do more analysis of code. However,
	empirical studies of code are expensive and their results are difficult
	to compare. We describe the Qualitas Corpus, a large curated collection
	of open source Java systems. The corpus reduces the cost of performing
	large empirical studies of code and supports comparison of measurements
	of the same artifacts. We discuss its design, organisation, and issues
	associated with its development.},
  doi = {10.1109/APSEC.2010.46},
  issn = {1530-1362},
  keywords = {Java code;Qualitas Corpus;curated collection;open source Java systems;software
	development;Java;codes;software engineering;},
  owner = {haalmuta},
  timestamp = {2012.04.11}
}

@ELECTRONIC{UNZ09,
  author = {UniversitiesNZ},
  month = {April},
  year = {2009},
  title = {Massey University Profile},
  organization = {Universities NZ},
  url = {http://www.universitiesnz.ac.nz/nz-university-system/massey-profile},
  owner = {haalmuta},
  timestamp = {2012.04.09}
}

@INPROCEEDINGS{WV04,
  author = {Visser, W. and Pasareanu, C. S. and Khurshid, S.},
  title = {Test Input Generation with Java PathFinder.},
  booktitle = {SOFTWARE ENGINEERING NOTES},
  year = {2004},
  volume = {29},
  series = {Software testing and analysis; ISSTA 2004: proceedings of the ACM
	SIGSOFT international symposium on software testing and analysis},
  pages = {97 - 107},
  edition = {29},
  owner = {haalmuta},
  timestamp = {2012.04.12}
}

@ARTICLE{SE02,
  title = {GREY BOX TESTING.},
  journal = {Encyclopedia of Software Engineering, Volume 1},
  year = {2002},
  volume = {1},
  pages = {585},
  abstract = {The article presents a definition for the term "grey box testing,"
	which refers to a class of testing techniques that combines black
	box and white box testing techniques. The term is used more commonly
	in Europe.},
  issn = {9780471210085},
  keywords = {COMPUTER software, SOFTWARE architecture, COMPUTER software development,
	SOFTWARE engineering, EUROPE},
  owner = {haalmuta},
  timestamp = {2012.04.10}
}

