% rename: res0000024-moors.tex

\documentclass[10pt]{sigplanconf} % preprint
% \setlength{\overfullrule}{5pt} 

\sloppy % IMPORTANT: adds necessary line breaks... great keyword!

\newcommand{\svnid}{\code{$Id: tcpoly.tex 274 2008-07-23 13:14:26Z adriaanm $}}

\usepackage{amsfonts}
\usepackage{amsmath}
\usepackage{pdfsync}


\usepackage[utf8]{inputenc}

% to get hyperref to stop complaining about bookmarks
% \makeatletter
% \providecommand*{\toclevel@title}{0}
% \providecommand*{\toclevel@author}{0}
% \makeatother

\usepackage{supertabular}

\usepackage{ifthen}
\usepackage{listings}
\usepackage{graphicx}
% \usepackage{color}
\usepackage[colorlinks]{hyperref} % generates  "Latex Error: ./tcpoly.tex:116 Missing \endcsname inserted."

% \definecolor{dullmagenta}{rgb}{0.4,0,0.4}   % #660066
% \definecolor{darkblue}{rgb}{0,0,0.4}
\hypersetup{linkcolor=blue,citecolor=blue,filecolor=blue,urlcolor=blue} % coloured links

\usepackage{courier}
\usepackage{times}
\usepackage{paralist}

\newcommand{\AWK}{{\color{red}AWK}}
\newcommand{\TODO}[1]{\mbox{{\color{red}TODO}}\{{\footnotesize{#1}}\}}


\newcommand{\code}[1]{\lstinline{#1}}
\newcommand{\class}[1]{\code{#1}}
\newcommand{\type}[1]{\code{#1}}
\newcommand{\kind}[1]{\code{#1}}
\newcommand{\method}[1]{\code{#1}}
\newcommand{\kto}[1]{\ensuremath{\rightarrow}}
\newcommand{\tmfun}[1]{\ensuremath{\rightarrow}}
\newcommand{\tpfun}[1]{\ensuremath{\Rightarrow}}
\newcommand{\nuObj}{$\nu$Obj}
\newcommand{\OmegaLang}{$\Omega$mega}
\newcommand{\CSharp}{C$^{\#}$}
\def\toplus{\hbox{$\, \buildrel {\tiny +}\over {\to}\,$}}
\def\tominus{\hbox{$\, \buildrel {\tiny -}\over {\to}\,$}}

\lstset{
  literate=
  {=>}{$\Rightarrow$}{2}
  {->}{$\to$}{2}
  {-(+)>}{$\toplus$}{2}  
  {-(-)>}{$\tominus$}{2}  
  {<-}{$\leftarrow$}{2}
  % {\\}{$\lambda$}{1}
  {<~}{$\prec$}{2}
  {<|}{$\triangleleft$}{2}
  {<:}{$<:$}{1}
}

\lstdefinelanguage{scala}{% 
       morekeywords={% 
                try, catch, throw, private, public, protected, import, package, implicit, final, package, trait, type, class, val, def, var, if, this, else, extends, with, while, new, abstract, object, requires, case, match, sealed,override},% 
         sensitive=t, % 
   morecomment=[s]{/*}{*/},morecomment=[l]{\//},% 
   escapeinside={/*\%}{*/},%
   rangeprefix= /*< ,rangesuffix= >*/,%
   morestring=[d]{"}% 
 }
 
\lstdefinelanguage{Haskell}{%
   otherkeywords={=>},%
   morekeywords={abstype,break,class,case,data,deriving,do,else,if,instance,newtype,of,return,then,where},%
   sensitive,%
   morecomment=[l]--,%
   morecomment=[n]{\{-}{-\}},%
   morestring=[b]"%
  }
  
%  numberbychapter=false,
\lstset{breaklines=true,language=scala} 
%\lstset{basicstyle=\footnotesize\ttfamily, breaklines=true, language=scala, tabsize=2, columns=fixed, mathescape=false,includerangemarker=false}
% thank you, Burak 
% (lstset tweaking stolen from
% http://lampsvn.epfl.ch/svn-repos/scala/scala/branches/typestate/docs/tstate-report/datasway.tex)
\lstset{
    xleftmargin=1em,%
    frame=none,%
    captionpos=b,%
    fontadjust=true,%
    columns=[c]fixed,%
    keepspaces=true,%
    basewidth={0.56em, 0.52em},%
    tabsize=2,%
    basicstyle=\renewcommand{\baselinestretch}{0.97}\small\tt,% \small\tt
    commentstyle=\textit,%
    keywordstyle=\bfseries,%
}

\bibliographystyle{abbrv}

% \renewcommand{\floatpagefraction}{0.90} 


 

\conferenceinfo{OOPSLA'08,} {October 19--23, 2008, Nashville, Tennessee, USA.}
\CopyrightYear{2008}
\copyrightdata{978-1-60558-215-3/08/10}

% \titlebanner{Draft \svnid}        % These are ignored unless
%\preprintfooter{}   % 'preprint' option specified.

\title{Generics of a Higher Kind}

\authorinfo{Adriaan Moors \and Frank Piessens}
           {DistriNet, K.U.Leuven}
           {\{adriaan, frank\}@cs.kuleuven.be}
\authorinfo{Martin Odersky}
           {EPFL}
           {martin.odersky@epfl.ch}

 

\input{theory}
\input{theory_override}
\usepackage{ottlayout}
%\ottstyledefaults{premiselayout=justify,numberpremises=yes,numbercolour=gray} 


% TODO: check for repeated words words ((\b\w+\b)\s*\1\b)
% TODO: capitalise dblp.bib
  
%\usepackage{verbatim} %  uncomment next 3 lines to get wordcount w/o listings&figures
% \renewenvironment{figure*}{\comment}{\endcomment}
% \renewenvironment{figure}{\comment}{\endcomment}
% \renewenvironment{lstlisting}{\comment}{\endcomment}

\begin{document}
\maketitle
% \thispagestyle{electronic} 

\begin{abstract} 
With Java 5 and \CSharp~2.0, first-order parametric polymorphism was introduced in mainstream object-oriented programming languages under the name of {\em generics}. Although the first-order variant of generics is very useful, it also imposes some restrictions: it is possible to abstract over a type, but the resulting type constructor cannot be abstracted over. This can lead to code duplication. We removed this restriction in Scala, by allowing type constructors as type parameters and abstract type members. This paper presents the design and implementation of the resulting type constructor polymorphism. Furthermore, we study how this feature interacts with existing object-oriented constructs, and show how it makes the language more expressive.
\end{abstract}

\category{D.3.3}{Programming Languages}{Language Constructs and Features}[Polymorphism]

\terms
Design, Experimentation, Languages

\keywords
type constructor polymorphism, higher-kinded types, higher-order genericity, Scala


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 
\section{Introduction} 
First-order parametric polymorphism is now a standard feature of statically typed programming languages. Starting with System F \cite{girard:thesis,DBLP:conf/programm/Reynolds74} and functional programming languages, the constructs have found their way into object-oriented languages such as Java, \CSharp, and many more. In these languages, first-order parametric polymorphism is usually called {\em generics}. Generics rest on sound theoretical foundations, which were established by Abadi and Cardelli~\cite{DBLP:journals/iandc/AbadiC96,DBLP:journals/scp/AbadiC95}, Igarashi et al. \cite{DBLP:journals/toplas/IgarashiPW01}, and many others; they are well-understood by now.

One standard application area of generics are collections. For instance, the type \lstinline@List[A]@ represents lists of a given element type \lstinline@A@, which can be chosen freely. In fact, generics can be seen as a generalisation of the type of arrays, which has always been parametric in the type of its elements.

First-order parametric polymorphism has some limitations, however. Although it allows to abstract over types, which yields {\em type constructors} such as \lstinline@List@, these type constructors cannot be abstracted over. For instance, one cannot pass a type constructor as a type argument to another type constructor. Abstractions that require this are quite common, even in object-oriented programming, and this restriction thus leads to unnecessary duplication of code. We provide several examples of such abstractions in this paper.

The generalisation of first-order polymorphism to a higher-order system was a natural step in lambda calculus \cite{girard:thesis,DBLP:conf/programm/Reynolds74,DBLP:journals/iandc/BruceMM90}. This theoretical advance has since been incorporated into functional programming languages. For instance, the Haskell programming language \cite{DBLP:journals/sigplan/HudakPWBFFGHHJKNPP92} supports type constructor polymorphism, which is also integrated with its type class concept \cite{DBLP:journals/jfp/Jones95}. This generalisation to types that abstract over types that abstract over types (``higher-kinded types'') has many practical applications. For example, comprehensions \cite{DBLP:journals/mscs/Wadler92}, parser combinators \cite{Hutton96:monpars,LeijenMeijer:parsec}, as well as more recent work on embedded Domain Specific Languages (DSL's) \cite{DBLP:conf/aplas/CaretteKS07,DBLP:conf/gpce/HoferORM07} critically rely on higher-kinded types.

The same needs -- as well as more specific ones -- arise in object-oriented programming. LINQ brought direct support for comprehensions to the
.NET platform \cite{DBLP:conf/ecoop/BiermanMS05,DBLP:conf/oopsla/Meijer07}, Scala \cite{LAMP-REPORT-2006-001} has had a similar feature from the start, and Java 5
introduced a lightweight variation \cite[Sec. 14.14.2]{gosling05:jls}. Parser combinators are also gaining momentum: Bracha uses them as the underlying technology for his
Executable Grammars \cite{1314923}, and Scala's distribution includes a library \cite{moors07:sparsec} that implements an embedded DSL for
parsing, which allows users to express parsers directly in Scala, in a notation that closely resembles EBNF. Type constructor polymorphism is
crucial in defining a common parser interface that is implemented by different back-ends. 

In this paper, we focus on our experience with extending Scala with type constructor polymorphism, and on the resulting gain in expressivity of the language as a whole. A similar extension could be added to, for example, Java in the same way \cite{Altherr07:fgjomega}. Our extension was incorporated in Scala 2.5, which was  released in May 2007. 


The main contributions of this paper are as follows:
\begin{itemize}
  \item We illustrate the utility and practicality of type constructor polymorphism using a realistic example.
  \item We develop a kind system that captures both lower and upper bounds, and variances of types.        
  \item We survey how the integration with existing features of Scala (such as subtyping, definition-site variance annotations, and implicit arguments) makes the language more powerful.
  \item We relate our experience with implementing the kind system in the open-source Scala compiler.  
\end{itemize}

For the reader who is not yet familiar with Scala, the next section provides a brief introduction.
The rest of this paper is divided in three parts, which each consider a different facet of the evaluation of type constructor polymorphism.
First, Section \ref{sec:dup} demonstrates that our extension reduces boilerplate that arises from the use of genericity. We establish intuitions
with a simple example, and extend it to a realistic implementation of the comprehensions fragment of \type{Iterable}.

Second, we present the type and kind system. Section \ref{sec:informalformal} discusses the surface syntax in full Scala, and the underlying model of kinds. Based on the ideas established in the theoretical part, Section \ref{sec:boundediter} refines \type{Iterable}, so that it accommodates collections that impose bounds on the type of their elements.

Third, we have validated the practicality of our design by implementing our extension in the Scala compiler, and we report on our experience in Section \ref{sec:fullscala}. Throughout the paper, we discuss various interactions of type constructor polymorphism with existing features in Scala. Section \ref{sec:implicits} focusses on the integration with Scala's implicits, which are used to encode Haskell's type classes. Our extension lifts this encoding to type \emph{constructor} classes. Furthermore, due to subtyping, Scala supports abstracting over type class contexts, so that the concept of a bounded monad can be expressed cleanly, which is not possible in (mainstream extensions of) Haskell.

Finally, we summarise related work in Section \ref{sec:related} and conclude in Section \ref{sec:conclusion}.

% % conclusion


\section{Prelude: Scala Basics \label{sec:tutorial}}
This section introduces the basic subset of Scala~\cite{LAMP-REPORT-2006-001,odersky08:scalabook} that is used in the examples of this paper. We assume familiarity with a Java-like language, and focus on what makes Scala different.


\subsection{Outline of the syntax}
A Scala program is roughly structured as a tree of nested definitions. A definition starts with a keyword, followed by its name, a classifier, and the entity to which the given name is bound, if it is a concrete definition. If the root of the tree is the compilation unit, the next level consists of objects (introduced by the keyword \code{object}) and classes (\mbox{\code{class},} \code{trait}), which in turn contain members. A member may again be a class or an object, a constant value member \mbox{(\code{val})}, a mutable value member (\code{var}), a method (\code{def}), or a type member (\code{type}). Note that a type annotation always follows the name (or, more generally, the expression) that it classifies.

On the one hand, Scala's syntax is very regular, with the \emph{keyword/name/classifier/bound entity}-sequence being its lead motif. Another important aspect of this regularity is nesting, which is virtually unconstrained. On the other hand, syntactic sugar enables flexibility and succinctness. For example, \code{buffer += 10} is shorthand for the method call \code{buffer.+=(10)}, where \code{+=} is a user-definable identifier.


\subsection{Functions}
Since Scala is a functional language, functions are first-class values. Thus, like an integer, a function can be written down directly: \code{x: Int => x + 1} is the successor function on integers. Furthermore, a function can be passed as an argument to a (higher-order) function or method. Functions and methods are treated similarly in Scala, the main difference is that a method is called on a target object.

The following definition introduces a function \code{len} that takes a \type{String} and yields an \type{Int} by calling \type{String}'s \code{length} method on its argument \code{s}: 

\begin{lstlisting}[frame=single]
  val len: String => Int = s => s.length
\end{lstlisting}

In the classifier of the definition, the type \type{String => Int}, the arrow \type{=>} is a type constructor, whereas it introduces an anonymous function on the right-hand side (where a value is expected). This anonymous function takes an argument \code{s} of type \type{String} and returns \code{s.length}. Thus, the application \code{len("four")} yields \code{4}. 

Note that the Scala compiler infers \cite{DBLP:conf/popl/OderskyZZ01} the type of the argument \code{s}, based on the expected type of the value \code{len}. The direction of type inference can also be reversed:

\begin{lstlisting}[frame=single]
  val len = (s: String) => s.length
\end{lstlisting}

The right-hand side's anonymous function can be abbreviated using syntactic sugar that implicitly introduces functional abstraction. This can be thought of as turning \mbox{\type{String}'s} \code{length} method into a function:

\begin{lstlisting}[frame=single]
  val len: String => Int = _.length
\end{lstlisting}

Finally, since Scala is purely object-oriented at its core, a function is represented internally as an object with an \code{apply} method that is derived straightforwardly from the function. Thus, one more equivalent definition of \code{len}:

\begin{lstlisting}[frame=single]
  object len {
    def apply(s: String): Int = s.length
  }
\end{lstlisting}



\subsection{Classes, traits, and objects}
In Scala, a class can inherit from another class and one or more traits. A trait is a class that can be composed with other traits using mixin composition. Mixin composition is a restricted form of multiple inheritance, which avoids ambiguities by linearising the graph that results from composing classes that are themselves composites. The details are not relevant for this paper, and we simply refer to both classes and traits as ``classes''.

The feature that is relevant to this paper, is that classes may contain \emph{type} members. An abstract type member is similar to a type parameter. The main difference between parameters and members is their scope and visibility. A type parameter is syntactically part of the type that it parameterises, whereas a type member -- like value members -- is encapsulated, and must be selected explicitly. Similarly, type members are inherited, while type parameters are local to their class. The complementary strengths of type parameters and abstract type members are a key ingredient of Scala's recipe for scalable component abstractions \cite{DBLP:conf/oopsla/OderskyZ05}.

Type parameters are made concrete using type application. Thus, given the definition \code{class List[T]}, \type{List} is a type constructor (or type function), and \type{List[Int]} is the application of this function to the argument \type{Int}. Abstract type members are made concrete using \emph{abstract type member refinement}, a special form of mixin composition. Note that \type{List} is now an abstract class\footnote{For brevity, we use the \code{trait} keyword instead of \code{abstract class}.}, since it has an abstract member \type{T}:

\begin{lstlisting}[frame=single]
trait List {
  type T
}
\end{lstlisting}

This abstract member is made concrete as follows:

\begin{lstlisting}[frame=single]
List{type T=Int}
\end{lstlisting}

Note that, with our extension, type members may also be parameterised, as in \type{type Container[X]}. 

Methods typically define one or more lists of value parameters, in addition to a list of type parameters. Thus, a method can be seen as a value that abstracts over values and types. For example, \code{def iterate[T](a: T)(next: T => T, done: T => Boolean): List[T]} introduces a method with one type parameter \type{T}, and two argument lists. Methods with multiple argument lists may be partially applied. For example, for some object \code{x} on which \code{iterate} is defined, \code{x.iterate(0)} corresponds to a higher-order function with type \type{(Int => Int, Int => Boolean) => List[Int]}. Note that the type parameter \type{T} was inferred to be \type{Int} from the type of \code{a}. 

Finally, an \code{object} introduces a class with a singleton instance, which can be referred to using the object's name.

% \TODO{more explanation needed? other topics to cover?}




% 
% Scala provides the programmer with abstraction mechanisms that scale from abstracting over a single expression, up to the composition of components that specify complex behaviour and their dependencies on other components \cite{DBLP:conf/oopsla/OderskyZ05}. Functional abstraction excels in the small, whereas the recursive structure of components is better handled by object-oriented techniques: traits express provided and required members (at the type and value level), and mixin composition is used to form a composition that meets the constraints of its constituents. In the context of this paper, a trait may be thought of as an abstract class that supports mixin composition; we gloss over the subtle differences between a class and a trait\footnote{\code{super} is treated differently in a trait, constructors are not allowed, and their compilation is more involved than for classes due to platform restrictions.}.
% 
% The functional style of abstraction uses parameterisation: a function abstracts over the concrete values of its arguments using parameters. Similarly, a ``generic'' class abstracts over one or more types using parametric polymorphism. Thus, abstraction is introduced using parameters, and function application makes these abstractions concrete. Since Scala is a functional programming language, a function is a value, which in turn is represented internally by an object, as Scala's core is purely object-oriented. 
% 
% Abstract members and mixin composition are the key ingredients of the object-oriented style of abstraction. A trait uses abstract members to abstract over values and types. This abstraction is made concrete by composing the original trait with another one that declares suitable concrete versions of the abstract members.
% 
% One of Scala's design goals is to offer the complementary strengths of functional and object-oriented abstraction, while tightly integrating the underlying language constructs. Type constructor polymorphism provides a kind of abstraction that was previously lacking: while values that abstract over values (i.e., functions) are considered first-class, as well as types that abstract over types in the object-oriented style (i.e., classes with abstract type members), the functional style of type-abstraction does not yield first-class types: type constructors are second-rate citizens.
% 
% % \emph{higher-order functions} that abstract over functions. The natural conclusion at the type level would be that a generic class is a first-class type, i.e., it can be passed as an argument to another generic class. This generalisation of parametric polymorphism, which we call ``type constructor polymorphism'', is discussed in the following sections.


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Reducing Code Duplication with Type Constructor Polymorphism \label{sec:dup}}
This section illustrates the benefits of generalising genericity to type constructor polymorphism using the well-known \type{Iterable} abstraction. The first example, which is due to Lex Spoon, illustrates the essence of the problem in the small. Section \ref{sec:scala:iterable} extends it to more realistic proportions.



% \begin{lstlisting}[float,caption=Limitations of Genericity,label=lst:iter:gen]
% trait Iterable[T] {
%   def filter(p: T => Boolean): Iterable[T]
%   def remove(p: T => Boolean): Iterable[T] = filter (x => !p(x))
% }
% 
% trait List[T] extends Iterable[T] {
%   def filter(p: T => Boolean): List[T] 
%   override def remove(p: T => Boolean): List[T] 
%     = filter (x => !p(x))
% } 
% \end{lstlisting}

% DONE: use same counter/name as listings  
\begin{lstlisting}[float=*,mathescape=true,caption=Limitations of Genericity,label=lst:iter:gen]
$\includegraphics[width=0.80\textwidth]{IterableRedundant}$
\end{lstlisting}


Listing \ref{lst:iter:gen} shows a Scala implementation of the trait \type{Iterable[T]}. It contains an abstract method \code{filter} and a convenience method \code{remove}. Subclasses should implement \code{filter} so that it creates a new collection by retaining  only the elements of the current collection that satisfy the predicate \code{p}. This predicate is modelled as a function that takes an element of the collection, which has type \type{T}, and returns a \type{Boolean}. As \code{remove} simply inverts the meaning of the predicate, it is implemented in terms of \code{filter}.

Naturally, when filtering a list, one expects to again receive a list. Thus, \type{List} overrides \code{filter} to refine its result type covariantly. For brevity, \type{List}'s subclasses, which implement this method, are omitted. For consistency, \code{remove} should have the same result type, but the only way to achieve this is by overriding it as well. The resulting code duplication is a clear indicator of a limitation of the type system: both methods in \type{List} are redundant, but the type system is not powerful enough to express them at the required level of abstraction in \type{Iterable}.


% \begin{lstlisting}[float,caption=Removing Code Duplication,label=lst:iter:tcpoly]
% trait Iterable[T, Container[X]] {
%   def filter(p: T => Boolean): Container[T]
%   def remove(p: T => Boolean): Container[T] = filter (x => !p(x))
% }
% 
% trait List[T] extends Iterable[T, List]
% \end{lstlisting}

\begin{lstlisting}[float=*,mathescape=true,caption=Removing Code Duplication,label=lst:iter:tcpoly]
$\includegraphics[width=0.85\textwidth]{IterableTcpoly}$
\end{lstlisting}

Our solution, depicted in Listing \ref{lst:iter:tcpoly}, is to abstract over the type constructor that represents the container of the result of \code{filter} and \code{remove}. The improved \type{Iterable} now takes two type parameters: the first one, \type{T}, stands for the type of its elements, and the second one, \type{Container}, represents the \emph{type constructor} that determines part of the result type of the \code{filter} and \code{remove} methods. More specifically, \type{Container} is a type parameter that itself takes one type parameter. Although the name of this higher-order type parameter (\code{X}) is not needed here, more sophisticated examples will show the benefit of explicitly naming\footnote{In full Scala `\type{_}' may be used as a wild-card name for higher-order type parameters.} higher-order type parameters.
 
Now, to denote that applying \code{filter} or \code{remove} to a \type{List[T]} returns a \type{List[T]}, \type{List} simply instantiates \type{Iterable}'s type parameter to the \type{List} type constructor.

In this simple example, one could also use a construct like Bruce's \type{MyType} \cite{DBLP:conf/ecoop/BruceSG95}. However, this scheme breaks down in more complex cases, as  demonstrated in the next section. 




%\TODO{minimise differences in versions of Iterable -- say something about this}

\subsection{Improving Iterable \label{sec:scala:iterable}}
In this section we design and implement the abstraction that underlies comprehensions \cite{DBLP:journals/mscs/Wadler92}. Type constructor polymorphism plays an essential role in expressing the design constraints, as well as in factoring out boilerplate code without losing type safety. More specifically, we discuss the signature and implementation of \type{Iterable}'s \code{map}, \mbox{\code{filter},} and \code{flatMap} methods. The LINQ project brought these to the .NET platform as \code{Select}, \code{Where}, and \code{SelectMany} \cite{DBLP:conf/oopsla/Meijer06}. 

Comprehensions provide a simple mechanism for dealing with collections by transforming their elements (\code{map}, \code{Select}), retrieving a sub-collection (\code{filter}, \code{Where}), and collecting the elements from a collection of collections in a single collection (\code{flatMap}, \code{SelectMany}). 


To achieve this, each of these methods interprets a user-supplied function in a different way in order to derive a new collection from the elements of an existing one: \code{map} transforms the elements as specified by that function, \code{filter} interprets the function as a predicate and retains only the elements that satisfy it, and \code{flatMap} uses the given function to produce a collection of elements for every element in the original collection, and then collects the elements in these collections in the resulting collection.

\begin{lstlisting}[float,caption=\type{Builder} and \type{Iterator},label=lst:builder]
trait Builder[Container[X], T] {
  def +=(el: T): Unit
  def finalise(): Container[T]
}

trait Iterator[T] {
  def next(): T
  def hasNext: Boolean
  
  def foreach(op: T => Unit): Unit 
    = while(hasNext) op(next())
}
\end{lstlisting}

The only collection-specific operations that are required by a method such as \code{map}, are iterating over a collection, and producing a new one. Thus, if these operations can be abstracted over, these methods can be implemented in \type{Iterable} in terms of these abstractions. Listing \ref{lst:builder} shows the well-known, lightweight, \type{Iterator} abstraction that encapsulates iterating over a collection, as well as the \type{Builder} abstraction, which captures how to produce a collection, and thus may be thought of as the dual of \type{Iterator}.

\type{Builder} crucially relies on type constructor polymorphism, as it must abstract over the type constructor that represents the collection that it builds. The \code{+=} method is used to supply the elements in the order in which they should appear in the collection. The collection itself is returned by \code{finalise}. For example, the \code{finalise} method of a \type{Builder[List, Int]} returns a \type{List[Int]}.

Listing \ref{lst:iterable} shows a minimal \type{Buildable} with an abstract \code{build} method, and a convenience method, \code{buildWith}, that captures the typical use-case for \code{build}. 

By analogy to the proven design that keeps \type{Iterator} and \type{Iterable} separated, \type{Builder} and \type{Buildable} are modelled as separate abstractions as well. In a full implementation, \type{Buildable} would contain several more methods, such as \code{unfold} (the dual of \code{fold} \cite{DBLP:conf/icfp/GibbonsJ98}), which should not clutter the lightweight \type{Builder} interface.

Note that \type{Iterable} uses a type constructor member, \type{Container}, to abstract over the precise type of the container, whereas \type{Buildable} uses a parameter. Since clients of \type{Iterable} generally are not concerned with the exact type of the container (except for the regularity that is imposed by our design), it is neatly encapsulated as a type member. \type{Buildable}'s primary purpose is exactly to create and populate a specific kind of container. Thus, the type of an instance of the \type{Buildable} class should specify the type of container that it builds. This information is still available with a type member, but it is less manifest.

The \code{map}/\code{filter}/\code{flatMap} methods are implemented in terms of the even more flexible trio \code{mapTo}/\code{filterTo}/\code{flatMapTo}. The generalisation consists of decoupling the original collection from the produced one -- they need not be the same, as long as there is a way of building the target collection. Thus, these methods take an extra argument of type \type{Buildable[C]}. Section \ref{sec:implicits} shows how an orthogonal feature of Scala can be used to relieve callers from supplying this argument explicitly.

For simplicity, the \code{mapTo} method is implemented as straightforwardly as possible. The \code{filterTo} method shows how the \code{buildWith} convenience method can be used.

The result types of \code{map}, \code{flatMap}, and their generalisations illustrate why a \type{MyType}-based solution would not work: whereas the type of \code{this} would be \type{C[T]}, the result type of these methods is \type{C[U]}: it is the same type \emph{constructor}, but it is applied to different type \emph{arguments}!

 
%  def unfold[T, U](seed: U, fun: U => Option[(T, U)]): Container[T] =  
%    def unfoldRec(seed: U)(implicit buff: Builder[Container, T]): Unit 
%      = fun(seed) map {(el, newSeed) => 
%          buff += el
%          unfoldRec(newSeed) // not tail-recursive unless map is inlined
%        }  
%    buildWith(unfoldRec(seed))
%  }  

%  def unfold[T](seed: T, fun: T => Option[T]): Container[T] =  {
%    def unfoldRec(seed: T, fun: T => Option[T])(buff: Builder[Container, T]): Unit = {
%       buff += seed
%       fun(seed) map (unfoldRec(_, fun)(buff))
%    }      
%    buildWith(unfoldRec(seed, fun))
%  }  

\begin{lstlisting}[float,label=lst:iterable,caption=\type{Buildable} and \type{Iterable}]
trait Buildable[Container[X]] {
  def build[T]: Builder[Container, T]
  
  def buildWith[T](f: Builder[Container,T]=> 
                       Unit): Container[T] ={
    val buff = build[T]
    f(buff)
    buff.finalise()
  }
}

trait Iterable[T] {
  type Container[X] <: Iterable[X]
  
  def elements: Iterator[T]

  def mapTo[U, C[X]](f: T => U)
           (b: Buildable[C]): C[U] = { 
    val buff = b.build[U]
    val elems = elements
    
    while(elems.hasNext){
      buff += f(elems.next)
    }  
    buff.finalise()
  }    
  def filterTo[C[X]](p: T => Boolean)
              (b: Buildable[C]): C[T] = { 
    val elems = elements
    
    b.buildWith[T]{ buff =>
      while(elems.hasNext){
        val el = elems.next
        if(p(el)) buff += el
      }
    }
  }
  def flatMapTo[U,C[X]](f: T=>Iterable[U])
               (b: Buildable[C]): C[U] = { 
    val buff = b.build[U]
    val elems = elements
    
    while(elems.hasNext){
      f(elems.next).elements.foreach{ el =>
        buff += el 
      }
    }
    buff.finalise()
  }
  
  def map[U](f: T => U)
      (b: Buildable[Container]): Container[U] 
    = mapTo[U, Container](f)(b)
  def filter(p: T => Boolean)
      (b: Buildable[Container]): Container[T] 
    = filterTo[Container](p)(b)
  def flatMap[U](f: T => Container[U])
      (b: Buildable[Container]): Container[U] 
    = flatMapTo[U, Container](f)(b)
}
\end{lstlisting}



\begin{lstlisting}[float,label=lst:buildable1,caption=Building a \type{List}]
object ListBuildable extends Buildable[List]{
  def build[T]: Builder[List, T] = new ListBuffer[T] with Builder[List, T] {
    // += is inherited from ListBuffer (Scala standard library)
    def finalise(): List[T] = toList
  }
}
\end{lstlisting}

\begin{lstlisting}[float,label=lst:buildable2,caption=Building an \type{Option}]
object OptionBuildable extends 
                       Buildable[Option] {
def build[T]: Builder[Option, T] 
  = new Builder[Option, T] {
      var res: Option[T] = None()
    
      def +=(el: T) 
        = if(res.isEmpty) res = Some(el) 
          else throw new UnsupportedOperation-Exception(">1 elements")
              
      def finalise(): Option[T] = res
    }
} 
\end{lstlisting}

%\footnote{An \code{object} definition is like an anonymous class definition with exactly one instance, which can be referred to using the object's name.}
Listings \ref{lst:buildable1} and \ref{lst:buildable2} show the objects that implement the \lstinline|Buildable| interface for \lstinline!List! and \lstinline!Option!. An \type{Option} corresponds to a list that contains either 0 or 1 elements, and is commonly used in Scala to avoid \code{null}'s.

\begin{lstlisting}[float,label=lst:list:iterable,caption=\type{List} subclasses \type{Iterable}]
class List[T] extends Iterable[T]{
  type Container[X] = List[X]
  
  def elements: Iterator[T] 
    = new Iterator[T] { 
        // standard implementation
    }
}
\end{lstlisting}

With all this in place, \type{List} can easily be implemented as a subclass of \type{Iterable}, as shown in Listing \ref{lst:list:iterable}. The type constructor of the container is fixed to be \type{List} itself, and the standard \type{Iterator} trait is implemented. This implementation does not offer any new insights, so we have omitted it.


  
\subsection{Example: using Iterable} 
This example demonstrates how to use \code{map} and \code{flatMap} to compute the average age of the users of, say, a social networking site.
Since users do not have to enter their birthday, the input is a \type{List[Option[Date]]}. An \type{Option[Date]} either holds a date or nothing. Listing \ref{lst:iterex} shows how to proceed.

First, a small helper is introduced that computes the current age in years from a date of birth. To collect the known ages, an optional date is transformed into an optional age using \code{map}. Then, the results are collected into a list using \code{flatMapTo}.
Note the use of the more general \mbox{\code{flatMapTo}.} With \code{flatMap}, the inner \code{map} would have had to convert its result from an \type{Option} to a \type{List}, as \code{flatMap(f)} returns its results in the same kind of container as produced by the function \code{f} (the inner \code{map}).
Finally, the results are aggregated using \code{reduceLeft} (not shown here). The full code of the example is available on the paper's homepage\footnote{\url{http://www.cs.kuleuven.be/~adriaan/?q=genericshk}}. 

Note that the Scala compiler infers most proper types (we added some annotations to aid understanding), but it does not infer type constructor arguments. Thus, type argument lists that contain type constructors, must be supplied manually.

\begin{lstlisting}[float,label=lst:iterex,caption=Example: using \type{Iterable}]
val bdays: List[Option[Date]] = List(
  Some(new Date("1981/08/07")), None, 
  Some(new Date("1990/04/10")))
def toYrs(bd: Date): Int = // omitted

val ages: List[Int] 
 = bdays.flatMapTo[Int, List]{ optBd => 
    optBd.map{d => toYrs(d)}(OptionBuildable)
   }(ListBuildable)

val avgAge = ages.reduceLeft[Int](_ + _) / 
               ages.length
\end{lstlisting}

Finally, the only type constructor that arises in the example is the \type{List} type argument, as type constructor inference has not been implemented yet. This demonstrates that the complexity of type constructor polymorphism, much like with genericity, is concentrated in the internals of the library. The upside is that library designers and implementers have more control over the interfaces of the library, while clients remain blissfully ignorant of the underlying complexity. (As noted earlier, Section \ref{sec:implicits} will show how the arguments of type \type{Buildable[C]} can be omitted.)

\subsection{Members versus parameters}
The relative merits of abstract members and parameters have been discussed in detail by many others \cite{DBLP:conf/ecoop/BruceOW98,DBLP:conf/ecoop/ThorupT99,DBLP:conf/ecoop/Ernst01}. The Scala philosophy is to embrace both: sometimes parameterisation is the right tool, and at other times, abstract members provide a better solution. Technically, it has been shown how to safely encode parameters as members \cite{moors08:scalina}, which -- surprisingly -- wasn't possible in earlier calculi \cite{DBLP:conf/ecoop/OderskyCRZ03}.

Our examples have used both styles of abstraction. \type{Buildable}'s main purpose is to build a certain container. Thus, \type{Container} is a type parameter: a characteristic that is manifest to external clients of \type{Buildable}, as it is (syntactically) part of the type of its values. In \type{Iterable} a type member is used, as its external clients are generally only interested in the type of its elements. Syntactically, type members are less visible, as \type{Iterable[T]} is a valid proper type. To make the type member explicit, one may write \lstinline|Iterable[T]{type Container[X]=List[X]}|. Alternatively, the \type{Container} type member can be selected on a singleton type that is a subtype of \type{Iterable[T]}.


% \TODO{discuss \ref{lst:iter:mem}}
% \begin{lstlisting}[float,caption=\type{Iterable} with an abstract type constructor member,label=lst:iter:mem]
% trait Iterable[T] {
%   type Container[X]
%   
%   def filter(p: T => Boolean): Container[T]
% }
% \end{lstlisting}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

%#{latexmk}input.file formal.tex 
\input{formal}

% {latexmk}input.file formal2.tex 
%\input{formal2}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Bounded Iterable \label{sec:boundediter}}
As motivated in Section \ref{sec:example:kinds:track:bounds}, in order for \type{Iterable}  to model collections that impose an (F-)bound on the type of their elements, it must accommodate this bound from the start. 

To allow subclasses of \type{Iterable} to declare an (F-)bound on the type of their elements, \type{Iterable} must abstract over this bound. Listing \ref{lst:boundediter} generalises the interface of the original \type{Iterable} from Listing \ref{lst:iterable}. The implementation is not affected by this change. 

\begin{lstlisting}[caption=Essential changes to extend Iterable with support for (F-)bounds,label=lst:boundediter,float]
trait Builder[Container[X <: B[X]], T <: B[T], B[Y]]
trait Buildable[Container[X <: B[X]], B[Y]] {
  def build[T <: B[T]]: Builder[Container,T,B]
}
trait Iterable[T <: Bound[T], Bound[X]] {
  type Container[X <: Bound[X]] <: Iterable[X, Bound]

  def map[U <: Bound[U]](f: T => U)
    (b: Buildable[Container, Bound]): Container[U] = ...
}
\end{lstlisting}



Listing \ref{lst:boundedsubs} illustrates various kinds of subclasses, including \type{List}, which does not impose a bound on the type of its elements, and thus uses \type{Any} as its bound (\type{Any} and \type{Nothing} are kind-overloaded). Note that \type{NumericList} can also be derived, by encoding the anonymous type function \type{X -> Number} as \type{Wrap1[Number]#Apply}.

\begin{lstlisting}[caption=(Bounded) subclasses of Iterable,label=lst:boundedsubs,float]
class List[T] extends Iterable[T, Any] {
  type Container[X] = List[X]
}

trait OrderedCollection[T <: Ordered[T]] extends Iterable[T, Ordered] {
  type Container[X <: Ordered[X]] <: OrderedCollection[X]
}

trait Wrap1[T]{type Apply[X]=T}

trait Number
class NumericList[T <: Number] extends Iterable[T, Wrap1[Number]#Apply] {
  type Container[X <: Number] = NumericList[X]
}
\end{lstlisting}

Again, the client of the collections API is not exposed to the relative complexity of Listing \ref{lst:boundediter}.  However, without it, a significant fraction of the collection classes could not be unified under the same \type{Iterable} abstraction. Thus, the clients of the library benefit, as a unified interface for collections, whether they constrain the type of their elements or not, means that they need to learn fewer concepts. 
 
Alternatively, it would be interesting to introduce kind-level abstraction to solve this problem. Tentatively, \type{Iter-able} % TODO: check linebreak
and \type{List} could then be expressed as: 

\begin{lstlisting}[frame=no]
trait Iterable[T : ElemK, ElemK : Kind]
class List[T] extends Iterable[T, *]  
\end{lstlisting}

This approach is more expressive than simply abstracting over the upper bound on the element type, as the interval kind can express lower and upper bounds simultaneously. This would become even more appealing in a language that allows the user to define new kinds \cite{DBLP:journals/entcs/Sheard07}. 


% \subsection{String as bounded Iterable}
% \TODO{}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Full Scala \label{sec:fullscala}}
In this section we discuss our experience with extending the full Scala compiler with type constructor polymorphism. As discussed below, the impact\footnote{The initial patch to the compiler can be viewed at \url{http://lampsvn.epfl.ch/trac/scala/changeset/10642}} of our extension is mostly restricted to the type checker. Finally, we list the limitations of our implementation, and discuss the interaction with variance. The implementation supports variance annotations on higher-order type parameters, but this has not been integrated in the formalisation yet.

\subsection{Implementation}
Extending the Scala compiler with support for type constructor polymorphism came down to introducing another level of indirection in the well-formedness checks for types.  % since the main data structures were already in place performance was only slightly affected, and only when type constructor polymorphism is actually used

Once abstract types could be parameterised (a simple extension to the parser and the abstract syntax trees), the check that type parameters must always be proper types had to be relaxed. Instead, a more sophisticated mechanism tracks the kinds that are inferred for these abstract types. Type application then checks two things: the type that is used as a type constructor must indeed have a function kind, and the kinds of the supplied arguments must conform to the expected kinds. Additionally, one must ensure that type constructors do not occur as the type of a value. 

Since Scala uses type erasure in the back-end, the extent of the changes is limited to the type checker. Clearly, our extension thus does not have any impact on the run-time characteristics of a program. Ironically, as type erasure is at the root of other limitations in Scala, it was an important benefit in implementing type constructor polymorphism.  

Similar extensions in languages that target the .NET platform face a tougher challenge, as the virtual machine has a richer notion of types and thus enforces stricter invariants. Unfortunately, the model of types does not include higher-kinded types. Thus, to ensure full interoperability with genericity in other languages on this platform, compilers for languages with type constructor polymorphism must resort to partial erasure, as well as code specialisation in order to construct the necessary representations of types that result from abstract type constructors being applied to arguments.

\subsubsection{Limitations}
Syntactically, there are a few limitations that we would like to lift in upcoming versions. As it stands, we do not directly support partial type application and currying, or anonymous type functions. However, these features can be encoded, as illustrated in Section \ref{sec:boundediter}.

We have not yet extended the type inferencer to infer higher-kinded types. In all likelihood, type constructor inference will have to be limited to a small subset in order to ensure decidability. 


\subsection{Variance \label{sec:variance}}

Another facet of the interaction between subtyping and type constructors is seen in Scala's support for definition-site variance annotations \cite{DBLP:conf/ecoop/EmirKRY06}. Variance annotations provide the information required to decide subtyping of types that result from applying the same type constructor to different types.

As the classical example, consider the definition of the class of immutable lists, \lstinline!class List[+T]!. The \lstinline!+! before \lstinline!List!'s type parameter denotes that \lstinline!List[T]! is a subtype of \lstinline!List[U]! if \lstinline!T! is a subtype of \lstinline!U!. We say that \lstinline!+! introduces a covariant type parameter, \lstinline!-! denotes contravariance (the subtyping relation between the type arguments is the inverse of the resulting relation between the constructed types), and the lack of an annotation means that these type arguments must be identical.

Variance annotations pose the same kind of challenge to the model of kinds as did bounded type parameters: kinds must encompass them as they represent information that should not be glossed over when passing around type constructors. The same strategy as for including bounds into \lstinline!*! can be applied here, except that variance is a property of type \emph{constructors}, so it should be tracked in \lstinline!->!, by distinguishing $\toplus$ and $\tominus$ \cite{SteffenThesis}.

Without going in too much detail, we illustrate the need for variance annotations on higher-order type parameters and how they influence kind conformance. 

Listing \ref{lst:variance} defines a perfectly valid \type{Seq} abstraction, albeit with a contrived \code{lift} method. Because \type{Seq} declares \type{C}'s type parameter \type{X} to be covariant, it may use its covariant type parameter \type{A} as an argument for \type{C}, so that \type{C[A] <: C[B]} when \type{A <: B}. 

\type{Seq} declares the type of its \code{this} variable to be \type{C[A]} (\code{self: C[A] =>} declares \code{self} as an alias for \code{this}, and gives it an explicit type). Thus, the \code{lift} method may return \code{this}, as its type can be subsumed to \type{C[B]}.

Suppose that a type constructor that is invariant in its first type parameter could be passed as the argument for a type constructor parameter that assumes its first type parameter to be covariant. This would foil the type system's first-order variance checks: \type{Seq}'s definition would be invalid if \type{C} were invariant in its first type parameter.

The remainder of Listing \ref{lst:variance} sets up a concrete example that would result in a run-time error if the type application \type{Seq[A, Cell]} were not ruled out statically.

More generally, a type constructor parameter that does not declare any variance for its parameters does not impose any restrictions on the variance of the parameters of its type argument. However, when either covariance or contravariance is assumed, the corresponding parameters of the type argument must have the same variance.

\begin{lstlisting}[caption=Example of unsoundness if higher-order variance annotations are not enforced.,label=lst:variance,float]
trait Seq[+A, C[+X]] { self: C[A] =>
  def lift[B >: A]: C[B] = this
}

class Cell[A] extends 
    Seq[A, Cell] { // the only (static) error
  private var cell: A = _
  def set(x: A) = cell = x
  def get: A = cell
}

class Top
class Ext extends Top {
  def bar() = println("bar")
}

val exts: Cell[Ext] = new Cell[Ext]
val tops: Cell[Top] = exts.lift[Top]
tops.set(new Top)
exts.get.bar()  // method not found error, if
         // the above static error is ignored
\end{lstlisting}

% \begin{comment}
%   Scala has definition-site variance, e.g., Iterable is covariant in first arg
% --> trait Iterable[+T] 
%   --> type Container[X] --> PROBLEM: Container[T] illegal
%    --> type Container[+X], otherwise we can't write Container[T] 
%  (T is covariant, cannot appear in invariant position)
% 
% example of problem if we ignore these rules
% \end{comment}


% \subsection{Variance in Scalina}
% \{sketch use of Co and Con kind constructors (cf. Edsko's uniqueness typing paper), unifying types and attributes (annotations) using kinds}
% 


%%%%%%%%%%%%%%%%%
\section{Leveraging Scala's implicits \label{sec:implicits}}  %\TODO{polish -- feels unfocused?}
In this section we discuss how the introduction of type constructor polymorphism has made Scala's support for implicit arguments more powerful. Implicits have been implemented in Scala since version 1.4. They are the minimal extension to an object-oriented language so that Haskell's type classes \cite{DBLP:conf/popl/WadlerB89} can be encoded \cite{odersky06:pmtc}. 

We first show how to improve the example from Section \ref{sec:dup} using implicits, so
that clients of \type{Iterable} no longer need to supply the correct instance of
\type{Buildable[C]}. Since there generally is only one instance of \type{Buildable[C]} for
a particular type constructor \type{C}, it becomes quite tedious to supply it as an
argument whenever calling one of \mbox{\type{Iterable}'s} methods that requires it.

Fortunately, Scala's implicits can be used to shift this burden to the compiler. It
suffices to add the \code{implicit} keyword to the parameter list that contains the
\code{b: Buildable[C]} parameter, and to the \type{XXXIsBuildable} objects. With this
change, which is sketched in Listing \ref{lst:implicits}, callers (such as in the example
of Listing \ref{lst:iterex}) typically do not need to supply this argument.



\begin{lstlisting}[float=p,label=lst:implicits,caption=Snippet: leveraging implicits in \type{Iterable}]
trait Iterable[T] {
  def map[U](f: T => U)
            (implicit b: Buildable[Container]): Container[U] 
    = mapTo[U, Container](f)
    // no need to pass b explicitly
    // similar for other methods
}

implicit object ListBuildable 
                extends Buildable[List]{...}
implicit object OptionBuildable 
                extends Buildable[Option]{..}

// client code (previous example, using succinct function syntax):
val ages: List[Int] 
= bdays.flatMapTo[Int, List]{_.map{toYrs(_)}}
\end{lstlisting}

In the rest of this section we explain this feature in order to illustrate the interaction
with type constructor polymorphism. With the introduction of type constructor
polymorphism, our encoding of type classes is extended to constructor classes, such as
\type{Monad}, as discussed in Section \ref{sec:bounds:tpclass}. Moreover, our encoding
exceeds the original because we integrate type constructor polymorphism with subtyping, so
that we can abstract over bounds. This would correspond to abstracting over type class
contexts, which is not supported in Haskell
\cite{hughes99:restricted,jones94:setmonad,kidd07:setmonad,chak07:classfamilies}. Section
\ref{sec:bounds:tpclass} discusses this in more detail.

\subsection{Introduction to implicits}
The principal idea behind implicit parameters is that arguments
for them can be left out from a method call. If the arguments corresponding to an
implicit parameter section are missing, they are inferred by the Scala compiler.

Listing \ref{lst:monoid} introduces implicits by way of a simple example. It defines an
abstract class of monoids and two concrete implementations, \code{StringMonoid} and
\code{IntMonoid}. The two implementations are marked with an \lstinline@implicit@
modifier.

Listing \ref{lst:monoid:sum} implements a \lstinline@sum@ method, which works
over arbitrary monoids. \lstinline@sum@'s second parameter is marked
\lstinline@implicit@. Note that \code{sum}'s recursive call does not need to pass along the \code{m} implicit argument.


\begin{lstlisting}[caption=Using implicits to model monoids,label=lst:monoid,float]
abstract class Monoid[T] {
  def add(x: T, y: T): T
  def unit: T
}

object Monoids {
  implicit object stringMonoid 
                  extends Monoid[String] {
    def add(x: String, y: String): String 
      = x.concat(y)
    def unit: String = ""
  }
  implicit object intMonoid 
                  extends Monoid[Int] {
    def add(x: Int, y: Int): Int
      = x + y
    def unit: Int = 0
  }
}
\end{lstlisting}

\begin{lstlisting}[caption=Summing lists over arbitrary monoids,label=lst:monoid:sum,float]
def sum[T](xs: List[T])(implicit m: Monoid[T]): T 
  = if(xs.isEmpty) m.unit else m.add(xs.head, sum(xs.tail))
\end{lstlisting}

The actual arguments that are eligible to be passed to an implicit
parameter include all identifiers that are marked \code{implicit}, and that can be accessed at the point
of the method call without a prefix. For instance, the scope of the
\lstinline@Monoids@ object can be opened up using an import statement, such as ~\mbox{\lstinline!import Monoids._!} This makes the two implicit definitions of \lstinline@stringMonoid@ and \lstinline@intMonoid@
eligible to be passed as implicit arguments, so that one can write:

\begin{lstlisting}[frame=no]
sum(List("a", "bc", "def"))
sum(List(1, 2, 3))
\end{lstlisting}
These applications of \lstinline@sum@ are equivalent to the following two applications, 
where the formerly implicit argument is now given explicitly.
\begin{lstlisting}[frame=no]
sum(List("a", "bc", "def"))(stringMonoid)
sum(List(1, 2, 3))(intMonoid)
\end{lstlisting}

If there are several eligible arguments that match an implicit
parameter's type, a most specific one will be chosen using the
standard rules of Scala's static overloading resolution. If there is
no unique most specific eligible implicit definition, the 
call is ambiguous and will result in a static error.

% \subsection{Implicit parameters generalise subtype bounds} Adding a parameter list to a method clearly constrains how that method may be called. For example, since \code{mapTo} declares a parameter of type \type{Buildable[C]}, it can only be called when an argument of type \type{Buildable[C]} can be supplied. Making this parameter list implicit shifts this burden to the compiler, but the constraint remains.
% 
% Similarly, subtype bounds on a method's type parameters constrain how this method may be called, as the actual type arguments supplied by the client must meet the declared bounds. Conceptually, this may be thought of as an implicit parameter that represents the coercion that corresponds to the subtype bound. 
% 
% Thus, both mechanisms can be used to restrict method calls. In both cases, the compiler carries the burden of providing the witness to that constraint. However, implicit parameters are more general than subtype bounds in two ways.
% 
% First, the programmer has access to the value that witnesses the constraint. With subtyping, a bound on a type parameter \type{T} simply results in more information being available on values of type \type{T}. To use that information, a value of type \type{T} is needed. 
% 
% In the case of type constructor parameters, such as \type{C} in \code{mapTo}, it is less useful to gain more information about values of type \type{C[T]} for some type \type{T}.  More concretely, suppose we would use a subtype constraint \type{C[X] <: Buildable[C]} instead of an implicit parameter. Thus, for any \type{T}, a \type{C[T]} is also a \mbox{\type{Buildable[C]}}. In this case, this results in a Catch-22: we need an instance of \type{Buildable[C]} to create an object of type \type{C[T]}, but the only way to acquire a \type{Buildable[C]} is to coerce a value of type \type{C[T]}. Moreover, the type \type{T} needs to be specified even though it is irrelevant for the \type{Buildable} abstraction.
% 
% %To make the consequences of using the bound \type{C[X] <: Buildable[C]} more concrete: this would require that \type{List[T]} extend \type{Buildable[List]}. However, this does not make sense, as \type{Buildable[List]}'s \code{build} method must then be called on a \type{List[T]}, even though its purpose is precisely to create such instances.
% 
% Using an implicit parameter instead of a bound solves both problems, without increasing the burden on clients of the abstraction. The compiler automatically supplies the right instance of \type{Buildable[C]}, without the need to specify a type \type{T} or acquire a value of type \type{C[T]}.
% 
% Second, implicit parameters can constrain other abstract types besides the method's own type parameters. As an example of such a generalised constraint \cite{DBLP:conf/ecoop/EmirKRY06}, the \code{map}/\code{filter}/\code{flatMap} methods all require an implicit parameter of type \type{Buildable[Container]}. This effectively constrains the \type{Container} type constructor member, which is not possible using ordinary subtype bounds.
% 
% 
% %\{novel approach to the builder pattern, makes it possible to implement a generalised \code{sequence}}


\subsection{Encoding Haskell's type classes with implicits \label{sec:tpclass}}
Haskell's type classes have grown from a simple mechanism that deals with overloading \cite{DBLP:conf/popl/WadlerB89}, to an important tool in dealing with the challenges of modern software engineering. Its success has prompted others to explore similar features in Java \cite{DBLP:conf/ecoop/WehrLT07}.


\lstset{language=Haskell}

\begin{lstlisting}[float,caption=Using type classes to overload \code{<=} in Haskell,label=lst:ord:haskell,language=haskell]
class  Ord a  where
  (<=) :: a -> a -> Bool
  
instance Ord Date where
  (<=)     = ...
  
max     :: Ord a => a -> a -> a
max x y = if x <= y then y else x
\end{lstlisting}

\subsubsection{An example in Haskell} Listing \ref{lst:ord:haskell} defines a simplified version of the well-known  \type{Ord} type class. This definition says that if a type \type{a} is in the \type{Ord} \emph{type class}, the function \code{<=} with type \lstinline!a -> a -> Bool! is available. The \emph{instance declaration} \lstinline!instance Ord Date! gives a concrete implementation of the \code{<=} operation on \type{Date}'s and thus adds \type{Date} as an \emph{instance} to the \type{Ord} type class. To constrain an abstract type to instances of a type class, \emph{contexts} are employed. For example, \code{max}'s signature constrains \type{a} to be an instance of \type{Ord} using the context \code{Ord a}, which is separated from the function's type by a \code{=>}.

Conceptually, a context that constrains a type \type{a}, is translated into an extra parameter that supplies the implementations of the type class's methods, packaged in a so-called ``method dictionary''. An instance declaration   specifies the contents of the method dictionary for this particular type. 


\lstset{language=Scala}


\begin{lstlisting}[float,caption=Encoding type classes using Scala's implicits,label=lst:ord:scala]
trait Ord[T] {
  def <= (other: T): Boolean
}

import java.util.Date

implicit def dateAsOrd(self: Date)
  = new Ord[Date] {
    def <= (other: Date) = self.equals(other)
                        || self.before(other)
  }

def max[T <% Ord[T]](x: T, y: T): T 
  = if(x <= y) y else x
\end{lstlisting}

\subsubsection{Encoding the example in Scala} It is natural to turn a type class into a class, as shown in Listing \ref{lst:ord:scala}. Thus, an instance of that class corresponds to a method dictionary, as it supplies the actual implementations of the methods declared in the class. The instance declaration \lstinline[language=Haskell]!instance Ord Date!  is translated into an implicit method that converts a \type{Date} into an \type{Ord[Date]}. An object of type \type{Ord[Date]} encodes the method dictionary of the \type{Ord} type class for the instance \type{Date}. 

Because of Scala's object-oriented nature, the creation of method dictionaries is driven by member selection. Whereas the Haskell compiler selects the right method dictionary fully automatically, this process is triggered by calling missing methods on objects of a type that is an instance (in the Haskell sense) of a type class that does provide this method. When a type class method, such as \code{<=}, is selected on a type \type{T} that does not define that method, the compiler searches an implicit value that converts a value of type \type{T} into a value that does support this method. In this case, the implicit method \code{dateAsOrd} is selected when \type{T} equals \type{Date}.

Note that Scala's scoping rules for implicits differ from Haskell's. Briefly, the search for an implicit is performed locally in the scope of the method call that triggered it, whereas this is a global process in Haskell.

Contexts are another trigger for selecting method dictionaries. The \code{Ord a} context of the \code{max} method is encoded as a view bound \lstinline!T <% Ord[T]!,
 which is syntactic sugar for an implicit parameter that converts the bounded type to its view bound. Thus, when the \code{max} method is called, the compiler must find the appropriate implicit conversion. Listing \ref{lst:max1} removes this syntactic sugar, and Listing \ref{lst:max2} goes even further and makes the implicits explicit. Clients would then have to supply the implicit conversion explicitly: \code{max(dateA, dateB)(dateAsOrd)}.


\begin{lstlisting}[float,label=lst:max1,caption=Desugaring view bounds]
def max[T](x: T, y: T)
          (implicit conv: T => Ord[T]): T 
  = if(x <= y) y else x
\end{lstlisting}

\begin{lstlisting}[float,label=lst:max2,caption=Making implicits explicit]
def max[T](x: T, y: T)(c: T => Ord[T]): T 
  = if(c(x).<=(y)) y else x
\end{lstlisting}

\subsubsection{Conditional implicits}
By defining implicit methods that themselves take implicit parameters, Haskell's conditional instance declarations can be encoded:

\begin{minipage}{\columnwidth}
\begin{lstlisting}[language=haskell,frame=no]
instance Ord a => Ord (List a) where
  (<=)     = ...
\end{lstlisting}
\end{minipage}

This is encoded in Scala as:
\begin{lstlisting}[frame=no]
implicit def listAsOrd[T](self: List[T])(implicit v: T => Ord[T]) = 
  new Ord[List[T]] {
    def <= (other: List[T]) = // compare elements in self and other
  }
\end{lstlisting}
Thus, two lists with elements of type \type{T} can be compared as long as their elements are comparable. %To ensure that the compiler's search for implicit arguments terminates, the Scala Language Specification defines a contractiveness check for implicit methods \cite{odersky:scala-reference}.
% TODO: this check is gone! maybe we can define bounded monad using view bounds instead of real subtyping bounds?

Type classes and implicits both provide ad-hoc polymorphism. Like parametric
polymorphism, this allows methods or classes to be applicable to
arbitrary types. However, parametric polymorphism implies that a
method or a class is truly indifferent to the actual argument of its
type parameter, whereas ad-hoc polymorphism maintains this illusion by
selecting different methods or classes for different actual type
arguments.

This ad-hoc nature of type classes and implicits can be seen as a
retroactive extension mechanism. In OOP, virtual classes
\cite{DBLP:conf/oopsla/OssherH92,ernst99b} have been proposed as an alternative that is better
suited for retroactive extension. However, ad-hoc polymorphism also
allows types to drive the selection of functionality as demonstrated
by the selection of (implicit) instances of \type{Buildable[C]} in our
\type{Iterable} example\footnote{Java's static overloading mechanism
  is another example of ad-hoc polymorphism.}. \type{Buildable}
clearly could not be truly polymorphic in its parameter, as that would
imply that there could be one \type{Buildable} that knew how to supply
a strategy for building any type of container.


\subsection{Exceeding type classes \label{sec:bounds:tpclass}}
\begin{lstlisting}[float,caption=\type{Set} cannot be made into a \type{Monad} in Haskell,label=lst:setmonad:haskell,language=haskell]
class Monad m where
  (>>=) :: m a -> (a -> m b) -> m b
  
data (Ord a) => Set a = ...

instance Monad Set where
 -- (>>=) :: Set a -> (a -> Set b) -> Set b
\end{lstlisting}

As shown in Listing \ref{lst:setmonad:haskell}, Haskell's \type{Monad} abstraction \cite{DBLP:conf/afp/Wadler95} does not apply to type constructors with a constrained type parameter, such as \mbox{\type{Set},} as explained below. Resolving this issue in Haskell is an active research topic \cite{chak07:classfamilies,DBLP:conf/popl/ChakravartyKJM05,hughes99:restricted}.

In this example, the \type{Monad} abstraction\footnote{In fact, the main difference between our \type{Iterable} and Haskell's \type{Monad} is spelling.} does not accommodate constraints on the type parameter of the \type{m} type constructor that it abstracts over. Since \type{Set} is a type constructor that constrains its type parameter, it is not a valid argument for \type{Monad}'s \type{m} type parameter: \type{m a} is allowed for any type \type{a}, whereas \type{Set a} is only allowed if \type{a} is an instance of the \type{Ord} type class. Thus, passing \type{Set} as \type{m} could lead to violating this constraint.

\begin{lstlisting}[float,caption=\type{Monad} in Scala,label=lst:monad:scala]
trait Monad[A, M[X]] {
  def >>= [B](f: A => M[B]): M[B] 
}
\end{lstlisting}

\begin{lstlisting}[float,caption=\type{Set} as a \type{BoundedMonad} in Scala,label=lst:setmonad:scala]
trait BoundedMonad[A <: Bound[A], M[X <: Bound[X]], Bound[X]] {
  def >>= [B <: Bound[B]](f: A => M[B]): M[B] 
}

trait Set[T <: Ord[T]]

implicit def SetIsBoundedMonad[T <: Ord[T]](
  s: Set[T]): BoundedMonad[T, Set, Ord] = ...
\end{lstlisting}

For reference, Listing \ref{lst:monad:scala} shows a direct encoding of the \type{Monad} type class. To solve the problem in Scala, we generalise \type{Monad} to \type{BoundedMonad} in Listing \ref{lst:setmonad:scala} to deal with bounded type constructors. Finally, the encoding from Section \ref{sec:tpclass} is used to turn a \type{Set} into a \type{BoundedMonad}.\\[2em] % TODO: EVIL HACK

% Note that a more faithful encoding would use view bounds instead of subtype bounds, but this is not yet accepted by the Scala compiler. Need to revise the definition of contractiveness to  account for higher-kinded types % TODO: this revision has been done...








%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\vspace{-2em} % TODO: EVIL HACK
\section{Related Work \label{sec:related}}
\subsection{Roots of our kinds}
Since the seminal work of Girard and Reynolds in the early 1970's, fragments of the higher-order polymorphic lambda calculus or System F$_\omega$ \cite{girard:thesis,DBLP:conf/programm/Reynolds74,DBLP:journals/iandc/BruceMM90} have served as the basis for many programming languages. The most notable example is Haskell \cite{DBLP:journals/sigplan/HudakPWBFFGHHJKNPP92}, which has supported higher-kinded types for over 15 years \cite{DBLP:conf/hopl/HudakHJW07}. 

Although Haskell has higher-kinded types, it eschews subtyping. Most of the use-cases for subtyping are subsumed by type classes, which handle overloading systematically \cite{DBLP:conf/popl/WadlerB89}. However, it is not (yet) possible to abstract over class contexts \cite{hughes99:restricted,jones94:setmonad,kidd07:setmonad,chak07:classfamilies}. In our setting, this corresponds to abstracting over a type that is used as a bound, as discussed in Section \ref{sec:bounds:tpclass}.

The interaction between higher-kinded types and subtyping is a well-studied subject  \cite{cardelli85:understanding,DBLP:conf/edbt/Cardelli88,DBLP:conf/fpca/CanningCHOM89,DBLP:journals/tcs/PierceS97,DBLP:journals/iandc/CompagnoniG03}. As far as we know, none of these approaches combine bounded type constructors, subkinding, subtyping \emph{and} variance, although all of these features are included in at least one of them. A similarity of interest is Cardelli's notion of power types \cite{DBLP:conf/popl/Cardelli88}, which corresponds to our bounds-tracking kind \mbox{\lstinline|*(L, U)|.}


% TODO: There has been a lot of work on higher-order
% polymorphism, and this paper does not sufficiently situate their work
% in that context and clarify what is novel, which to my mind is the
% integration with OO features.


In summary, the presented type system can be thought of as the integration of an object-oriented system with Polarized F$^{\omega}_{sub}$ \cite{SteffenThesis}, Cardelli's power type, and subkinding. Subkinding is based on interval inclusion and the transposition of subtyping of dependent function types \cite{DBLP:journals/tcs/AspinallC01} to the level of kinds.

%Another interaction that occurs in Scala is that between subtyping and dependent types \cite{DBLP:journals/tcs/AspinallC01}
% subtyping and matching \cite{DBLP:journals/toplas/AbadiC96}

%\OmegaLang~\cite{DBLP:journals/entcs/Sheard07} is a Haskell-based language that (most notably) supports user-defined kinds and type-level computation. To a certain extent, it seems possible to encode the first mechanism using sealed hierarchies of abstract classes in Scala. Scala's singleton types are a good match for the Singleton pattern, which --- according to Sheard ---  is an important concept in \OmegaLang.  It may be possible to encode a limited form of type-level computation using Scala's implicits. However, dedicated support is clearly needed for this feature to be powerful enough. Part of our ongoing work is geared towards bringing the essence of \OmegaLang's power to Scala. The main goal of this effort is to realise an extensible type system that can be used for program verification.

%The main ideas can be implemented in Java or \CSharp, see Altherr and Cremet \cite{Altherr07:fgjomega} for a proposal of a Java extension.

% \subsection{Kind soundness}
% The \nuObj~calculus \cite{DBLP:conf/ecoop/OderskyCRZ03} does not possess the kind soundness property that was discussed in Section \ref{sec:embedding:kindsoundness}. Type parameters are encoded as abstract type members, which behave \emph{covariantly}. There is no way to enforce contravariance for these type members. Thus, type-level functions cannot be encoded faithfully, as their arguments must adhere to a contravariant regime.
% 
% Related work seems to deviate from \nuObj's design, although making a precise comparison is complicated by the differences in features supported by the various approaches. In the notation of Cardelli \cite{DBLP:conf/edbt/Cardelli88}, the types from the example in
% Listing \ref{lst:numlistmem} are classified as follows:
% 
% \begin{lstlisting}
% NumericList : ALL[A::POWER[Number]] TYPE
% Container : ALL[X::TYPE] TYPE
% \end{lstlisting}
% 
% Cardelli does not define subkinding for these kinds, but does define subtyping for polymorphic functions (``All [X::K] B \textless{}: All [X::K$'$] B$'$ if K$'$\textless{}::K (where \textless{}:: denotes a subkind relation[, \ldots{}]), and B\textless{}:B$'$ under the assumption that X::K$'$''). It seems reasonable to lift this rule (which deals with functions that take a type to yield a \emph{value}) to the level of kinds, which results in our rule that deals with functions that take a type to yield a \emph{type}.
% 
% Similarly, in the notation of Compagnoni and Goguen \cite{DBLP:journals/iandc/CompagnoniG03}:
% 
% \begin{lstlisting}
% NumericList : Pi A <: Number : *. *
% Container : Pi X <: T* : *. *
% \end{lstlisting}
% 
% Although the authors require these bounds to be equal for the kinds to be comparable (their treatment does not include subkinding), we generalise based on the same observation as the previous paragraph, but using a slight different source of inspiration. Namely, Full System F$_{<}$:'s \cite{DBLP:journals/iandc/CardelliMMS94} rule that deals with bounded quantification at the value level (Sub Forall) also requires \emph{contravariance} for the bounds of the quantifier.
% 
% We recover early error detection in Scalina \cite{moors08:scalina}, a purely object-oriented calculus, by differentiating covariant and contravariant members, instead of assuming they all behave covariantly. This distinction corresponds to the fact that some members abstract over input, whereas others represent the output of the abstraction. Input members should behave contravariantly, like the types of a method's parameters, whereas covariance is required for output members, which correspond to a method's result type. With this distinction, a purely object-oriented calculus can encode functional-style abstraction with the same safety guarantees.


\subsection{Type constructor polymorphism in OOPL's} 
Languages with virtual types or virtual classes, such as gbeta \cite{ernst99b}, can encode
type constructor polymorphism through abstract type members. The idea is to model a type
constructor such as \lstinline@List@ as a simple abstract type that has a type member
describing the element type. Since Scala has virtual types, \lstinline@List@ could also be
defined as a class with an abstract type member instead of as a type-parameterised class:

\begin{lstlisting}[frame=no]
abstract class List { type Elem }
\end{lstlisting}

Then, a concrete instantiation of \lstinline@List@ could be modelled as a type refinement, as in \lstinline@List{type Elem = String}@. The crucial point is that in this encoding \lstinline@List@ is a type, not a type constructor. So first-order polymorphism suffices to pass the \lstinline@List@ constructor as a type argument or an abstract type member refinement.

Compared to type constructor polymorphism, this encoding has a serious disadvantage, as it
permits the definition of certain accidentally empty type abstractions that cannot be
instantiated to concrete values later on. By contrast, type constructor polymorphism has a
{\em kind soundness} property that guarantees that well-kinded type applications never
result in nonsensical types.

Type constructor polymorphism has recently started to trickle down to object-oriented languages. Cremet and Altherr's work on extending Featherweight Generic Java with higher-kinded types \cite{cremet-altherr:jot08} partly inspired the design of our syntax. However, since they extend Java, they do not model type members and path-dependent types, definition-site variance, or intersection types. They do provide direct support for anonymous type constructors. Furthermore, although their work demonstrates that type constructor polymorphism can be integrated into Java, they only provide a prototype of a compiler and an interpreter. However, they have developed a mechanised soundness proof and a pencil-and-paper proof of decidability. 

Finally, we briefly mention OCaml and C++. C++'s template mechanism is related, but, while templates are very flexible, this comes at a steep price: they can only be type-checked after they have been expanded. Recent work on ``concepts'' alleviates this \cite{DBLP:conf/oopsla/GregorJSSRL06}. 

In OCaml (as in ML), type constructors are first-order. Thus, although a type of, e.g., kind \kind{* -> * -> *} is supported, types of kind \kind{(* -> *) -> * -> *} cannot be expressed directly. However, ML dialects that support applicative functors, such as OCaml and Moscow ML, can encode type constructor polymorphism in much the same way as languages with virtual types.




% distill to one sentence and include in conclusion (i.e., it's not about reducing LoC, but about better interfaces) ?
% \subsection{Quantitative evaluation}
% As an indication of how much boilerplate can be scrapped using type constructor polymorphism, we performed a small experiment. We selected a subset of the actual Scala collection API (\type{Iterator}, \type{Iterable}, \type{List}, and \type{Option}) and re-implemented it, making full use of our extension. This rewrite resulted in a $10\%$ savings of lines of code (from 154 lines to 139 lines)\footnote{The full source code for the experiment is available on the paper's homepage.}. This ratio is in line with others' results \cite{DBLP:conf/oopsla/BlackSD03}.
% 
% Of course, our sample is quite small. However, for a bigger sample, the absolute reduction does not decrease, given more subclasses of \type{Iterable}, and more methods in \type{Iterable} that need to be re-implemented. Suppose the full \type{Iterable} interface contains $M$ methods (average LoC in such a method: $N$) that build a collection and that must be re-implemented in \type{Iterable}'s subclasses (say there are $C$ of those). In \type{List} a \type{List} must be produced, in \type{Set}, the result must again be a \type{Set}, and so on.
% 
% Thus, if one scraps this boilerplate using the \type{Buildable} pattern (whose implementation takes $N'$ lines per class, on average), $C*M$ methods can be omitted, or $C*(M*N - N')$ lines of code. The results from this smaller example indicate that it is safe to assume that $M*N > N'$.
% 
% Finally, this comparison does not consider the impact on performance (a re-implementation in subclasses may sometimes yield a performance increase, although this is certainly not always the case), or more qualitative aspects, such as precision of the interface, reusability,\ldots 

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Conclusion \label{sec:conclusion}}

Genericity is a proven technique to reduce code duplication in object-oriented libraries, as well as making them easier to use by clients. The prime example is a collections library, where clients no longer need to cast the elements they retrieve from a generic collection.

Unfortunately, though genericity is extremely useful, the first-order variant is self-defeating in the sense that abstracting over proper types gives rise to type constructors, which cannot be abstracted over. Thus, by using genericity to reduce code duplication, other kinds of boilerplate arise. Type constructor polymorphism allows to further eliminate these redundancies, as it generalises genericity to type constructors. 

As with genericity, most use cases for type constructor polymorphism arise in library design and implementation, where it provides more control over the interfaces that are exposed to clients, while reducing code duplication.  Moreover, clients are not exposed to the complexity that is inherent to these advanced abstraction mechanisms. In fact, clients \emph{benefit} from the more precise interfaces that can be expressed with type constructor polymorphism, just like genericity reduced the number of casts that clients of a collections library had to write.

We implemented type constructor polymorphism in Scala 2.5. The essence of our solution carries over easily to Java, see Altherr et al. for a proposal \cite{Altherr07:fgjomega}. 

Finally, we have only reported on one of several applications that we have experimented with. Embedded domain specific languages (DSL's) \cite{DBLP:conf/aplas/CaretteKS07} are another promising application area of type constructor polymorphism. We are currently applying these ideas to our parser combinator library, a DSL for writing EBNF grammars in Scala \cite{moors07:sparsec}. Hofer, Ostermann et al. are investigating similar applications \cite{DBLP:conf/gpce/HoferORM07}, which critically rely on type constructor polymorphism. 



%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\acks
The authors would like to thank Dave Clarke, Marko van Dooren, Burak Emir, Erik Ernst, Bart Jacobs, Andreas Rossberg, Jan Smans, and Lex Spoon for their insightful comments and interesting discussions. We also gratefully acknowledge the Scala community for providing a fertile testbed for this research. Finally, we thank the reviewers for their detailed comments that helped us improve the paper. An older version of this paper was presented at the MPOOL workshop \cite{moors07:tcpoly}.

The first author is supported by a grant from the Flemish IWT. Part of the reported work was performed during a 3-month stay at EPFL.

%\bibliography{manual,dblp}

\begin{thebibliography}{10}

\bibitem{DBLP:journals/scp/AbadiC95}
M.~Abadi and L.~Cardelli.
\newblock A theory of primitive objects: Second-order systems.
\newblock {\em Sci. Comput. Program.}, 25(2-3):81--116, 1995.

\bibitem{DBLP:journals/iandc/AbadiC96}
M.~Abadi and L.~Cardelli.
\newblock A theory of primitive objects: Untyped and first-order systems.
\newblock {\em Inf. Comput.}, 125(2):78--102, 1996.

\bibitem{Altherr07:fgjomega}
P.~Altherr and V.~Cremet.
\newblock Adding type constructor parameterization to {J}ava.
\newblock Accepted to the workshop on Formal Techniques for {J}ava-like
  Programs (FTfJP'07) at the European Conference on Object-Oriented Programming
  (ECOOP), 2007.

\bibitem{DBLP:journals/tcs/AspinallC01}
D.~Aspinall and A.~B. Compagnoni.
\newblock Subtyping dependent types.
\newblock {\em Theor. Comput. Sci.}, 266(1-2):273--309, 2001.

\bibitem{DBLP:conf/ecoop/BiermanMS05}
G.~M. Bierman, E.~Meijer, and W.~Schulte.
\newblock The essence of data access in {Comega}.
\newblock In A.~P. Black, editor, {\em ECOOP}, volume 3586 of {\em Lecture
  Notes in Computer Science}, pages 287--311. Springer, 2005.

\bibitem{1314923}
G.~Bracha.
\newblock Executable grammars in {N}ewspeak.
\newblock {\em Electron. Notes Theor. Comput. Sci.}, 193:3--18, 2007.

\bibitem{DBLP:journals/iandc/BruceMM90}
K.~B. Bruce, A.~R. Meyer, and J.~C. Mitchell.
\newblock The semantics of second-order lambda calculus.
\newblock {\em Inf. Comput.}, 85(1):76--134, 1990.

\bibitem{DBLP:conf/ecoop/BruceOW98}
K.~B. Bruce, M.~Odersky, and P.~Wadler.
\newblock A statically safe alternative to virtual types.
\newblock In E.~Jul, editor, {\em ECOOP}, volume 1445 of {\em Lecture Notes in
  Computer Science}, pages 523--549. Springer, 1998.

\bibitem{DBLP:conf/ecoop/BruceSG95}
K.~B. Bruce, A.~Schuett, and R.~van Gent.
\newblock {PolyTOIL}: A type-safe polymorphic object-oriented language.
\newblock In W.~G. Olthoff, editor, {\em ECOOP}, volume 952 of {\em Lecture
  Notes in Computer Science}, pages 27--51. Springer, 1995.

\bibitem{DBLP:conf/fpca/CanningCHOM89}
P.~S. Canning, W.~R. Cook, W.~L. Hill, W.~G. Olthoff, and J.~C. Mitchell.
\newblock F-bounded polymorphism for object-oriented programming.
\newblock In {\em FPCA}, pages 273--280, 1989.

\bibitem{DBLP:conf/popl/Cardelli88}
L.~Cardelli.
\newblock Structural subtyping and the notion of power type.
\newblock In {\em POPL}, pages 70--79, 1988.

\bibitem{DBLP:conf/edbt/Cardelli88}
L.~Cardelli.
\newblock Types for data-oriented languages.
\newblock In J.~W. Schmidt, S.~Ceri, and M.~Missikoff, editors, {\em EDBT},
  volume 303 of {\em Lecture Notes in Computer Science}, pages 1--15. Springer,
  1988.

\bibitem{cardelli85:understanding}
L.~Cardelli and P.~Wegner.
\newblock On understanding types, data abstraction, and polymorphism.
\newblock {\em ACM Computing Surveys}, 17(4):471--522, 1985.

\bibitem{DBLP:conf/aplas/CaretteKS07}
J.~Carette, O.~Kiselyov, and C.~chieh Shan.
\newblock Finally tagless, partially evaluated.
\newblock In Z.~Shao, editor, {\em APLAS}, volume 4807 of {\em Lecture Notes in
  Computer Science}, pages 222--238. Springer, 2007.

\bibitem{chak07:classfamilies}
M.~Chakravarty, S.~L.~P. Jones, M.~Sulzmann, and T.~Schrijvers.
\newblock Class families, 2007.
\newblock On the GHC Developer wiki,
  \url{http://hackage.haskell.org/trac/ghc/wiki/TypeFunctions/ClassFamilies}.

\bibitem{DBLP:conf/popl/ChakravartyKJM05}
M.~M.~T. Chakravarty, G.~Keller, S.~L.~P. Jones, and S.~Marlow.
\newblock Associated types with class.
\newblock In J.~Palsberg and M.~Abadi, editors, {\em POPL}, pages 1--13. ACM,
  2005.

\bibitem{DBLP:journals/iandc/CompagnoniG03}
A.~B. Compagnoni and H.~Goguen.
\newblock Typed operational semantics for higher-order subtyping.
\newblock {\em Inf. Comput.}, 184(2):242--297, 2003.

\bibitem{cremet-altherr:jot08}
V.~Cremet and P.~Altherr.
\newblock Adding type constructor parameterization to {J}ava.
\newblock {\em Journal of Object Technology}, 7(5):25--65, June 2008.
\newblock Special Issue: Workshop on FTfJP, ECOOP 07.
  http://www.jot.fm/issues/issue\_2008\_06/article2/.

\bibitem{DBLP:conf/ecoop/EmirKRY06}
B.~Emir, A.~Kennedy, C.~V. Russo, and D.~Yu.
\newblock Variance and generalized constraints for {C}$^{\mbox{\#}}$ generics.
\newblock In D.~Thomas, editor, {\em ECOOP}, volume 4067 of {\em Lecture Notes
  in Computer Science}, pages 279--303. Springer, 2006.

\bibitem{ernst99b}
E.~Ernst.
\newblock {\em gbeta -- a Language with Virtual Attributes, Block Structure,
  and Propagating, Dynamic Inheritance}.
\newblock PhD thesis, Department of Computer Science, University of Aarhus,
  \AA{}rhus, Denmark, 1999.

\bibitem{DBLP:conf/ecoop/Ernst01}
E.~Ernst.
\newblock Family polymorphism.
\newblock In J.~L. Knudsen, editor, {\em ECOOP}, volume 2072 of {\em Lecture
  Notes in Computer Science}, pages 303--326. Springer, 2001.

\bibitem{DBLP:conf/icfp/GibbonsJ98}
J.~Gibbons and G.~Jones.
\newblock The under-appreciated unfold.
\newblock In {\em ICFP}, pages 273--279, 1998.

\bibitem{girard:thesis}
J.~Girard.
\newblock Interpretation fonctionelle et elimination des coupures de
  l'arithmetique d'ordre superieur.
\newblock Th{\`e}se d'{\'E}tat, Paris VII, 1972.

\bibitem{gosling05:jls}
J.~Gosling, B.~Joy, G.~Steele, and G.~Bracha.
\newblock {\em Java(TM) Language Specification, The (3rd Edition) (Java
  (Addison-Wesley))}.
\newblock Addison-Wesley Professional, 2005.

\bibitem{DBLP:conf/oopsla/GregorJSSRL06}
D.~Gregor, J.~J{\"a}rvi, J.~G. Siek, B.~Stroustrup, G.~D. Reis, and
  A.~Lumsdaine.
\newblock Concepts: linguistic support for generic programming in {C++}.
\newblock In P.~L. Tarr and W.~R. Cook, editors, {\em OOPSLA}, pages 291--310.
  ACM, 2006.

\bibitem{DBLP:conf/gpce/HoferORM07}
C.~Hofer, K.~Ostermann, T.~Rendel, and A.~Moors.
\newblock Polymorphic embedding of {DSL}s.
\newblock In Y.~Smaragdakis and J.~Siek, editors, {\em GPCE}. ACM, 2008.
\newblock To appear.

\bibitem{DBLP:conf/hopl/HudakHJW07}
P.~Hudak, J.~Hughes, S.~L.~P. Jones, and P.~Wadler.
\newblock A history of {H}askell: being lazy with class.
\newblock In B.~G. Ryder and B.~Hailpern, editors, {\em HOPL}, pages 1--55.
  ACM, 2007.

\bibitem{DBLP:journals/sigplan/HudakPWBFFGHHJKNPP92}
P.~Hudak, S.~L.~P. Jones, P.~Wadler, B.~Boutel, J.~Fairbairn, J.~H. Fasel,
  M.~M. Guzm{\'a}n, K.~Hammond, J.~Hughes, T.~Johnsson, R.~B. Kieburtz, R.~S.
  Nikhil, W.~Partain, and J.~Peterson.
\newblock Report on the programming language {H}askell, a non-strict, purely
  functional language.
\newblock {\em SIGPLAN Notices}, 27(5):R1--R164, 1992.

\bibitem{hughes99:restricted}
J.~Hughes.
\newblock Restricted datatypes in {H}askell.
\newblock Technical Report UU-CS-1999-28, Department of Information and
  Computing Sciences, Utrecht University, 1999.

\bibitem{Hutton96:monpars}
G.~Hutton and E.~Meijer.
\newblock {Monadic Parser Combinators}.
\newblock Technical Report NOTTCS-TR-96-4, Department of Computer Science,
  University of Nottingham, 1996.

\bibitem{DBLP:journals/toplas/IgarashiPW01}
A.~Igarashi, B.~C. Pierce, and P.~Wadler.
\newblock Featherweight {J}ava: a minimal core calculus for {J}ava and {GJ}.
\newblock {\em ACM Trans. Program. Lang. Syst.}, 23(3):396--450, 2001.

\bibitem{jones94:setmonad}
M.~P. Jones.
\newblock constructor classes \& "set" monad?, 1994.
\newblock
  \url{http://groups.google.com/group/comp.lang.functional/msg/e10290b2511c65f%
0}.

\bibitem{DBLP:journals/jfp/Jones95}
M.~P. Jones.
\newblock A system of constructor classes: Overloading and implicit
  higher-order polymorphism.
\newblock {\em J. Funct. Program.}, 5(1):1--35, 1995.

\bibitem{kidd07:setmonad}
E.~Kidd.
\newblock How to make data.set a monad, 2007.
\newblock
  \url{http://www.randomhacks.net/articles/2007/03/15/data-set-monad-haskell-m%
acros}.

\bibitem{LeijenMeijer:parsec}
D.~Leijen and E.~Meijer.
\newblock Parsec: Direct style monadic parser combinators for the real world.
\newblock Technical Report UU-CS-2001-27, Department of Computer Science,
  Universiteit Utrecht, 2001.

\bibitem{DBLP:conf/oopsla/Meijer06}
E.~Meijer.
\newblock There is no impedance mismatch: (language integrated query in
  {V}isual {B}asic 9).
\newblock In P.~L. Tarr and W.~R. Cook, editors, {\em OOPSLA Companion}, pages
  710--711. ACM, 2006.

\bibitem{DBLP:conf/oopsla/Meijer07}
E.~Meijer.
\newblock Confessions of a used programming language salesman.
\newblock In R.~P. Gabriel, D.~F. Bacon, C.~V. Lopes, and G.~L.~S. Jr.,
  editors, {\em OOPSLA}, pages 677--694. ACM, 2007.

\bibitem{moors07:tcpoly}
A.~Moors, F.~Piessens, and M.~Odersky.
\newblock Towards equal rights for higher-kinded types.
\newblock Accepted for the 6th International Workshop on Multiparadigm
  Programming with Object-Oriented Languages at the European Conference on
  Object-Oriented Programming (ECOOP), 2007.

\bibitem{moors07:sparsec}
A.~Moors, F.~Piessens, and M.~Odersky.
\newblock Parser combinators in {S}cala.
\newblock Technical Report CW491, Department of Computer Science, K.U. Leuven,
  2008.
\newblock
  \url{http://www.cs.kuleuven.be/publicaties/rapporten/cw/CW491.abs.html}.

\bibitem{moors08:scalina}
A.~Moors, F.~Piessens, and M.~Odersky.
\newblock Safe type-level abstraction in {S}cala.
\newblock In {\em Proc. FOOL '08}, Jan. 2008.
\newblock \url{http://fool08.kuis.kyoto-u.ac.jp/}.

\bibitem{odersky06:pmtc}
M.~Odersky.
\newblock Poor man's type classes, July 2006.
\newblock Talk at IFIP WG 2.8, Boston.

\bibitem{odersky:scala-reference}
M.~Odersky.
\newblock {\em The {S}cala {L}anguage {S}pecification, Version 2.6}.
\newblock EPFL, Nov. 2007.
\newblock \url{http://www.scala-lang.org/docu/files/ScalaReference.pdf}.

\bibitem{LAMP-REPORT-2006-001}
M.~Odersky, P.~Altherr, V.~Cremet, I.~Dragos, G.~Dubochet, B.~Emir,
  S.~McDirmid, S.~Micheloud, N.~Mihaylov, M.~Schinz, L.~Spoon, E.~Stenman, and
  M.~Zenger.
\newblock An {O}verview of the {S}cala {P}rogramming {L}anguage (2. edition).
\newblock Technical report, 2006.

\bibitem{DBLP:conf/ecoop/OderskyCRZ03}
M.~Odersky, V.~Cremet, C.~R{\"o}ckl, and M.~Zenger.
\newblock A nominal theory of objects with dependent types.
\newblock In L.~Cardelli, editor, {\em ECOOP}, volume 2743 of {\em Lecture
  Notes in Computer Science}, pages 201--224. Springer, 2003.

\bibitem{odersky08:scalabook}
M.~Odersky, L.~Spoon, and B.~Venners.
\newblock {\em Programming in {S}cala}.
\newblock Artima, 2008.

\bibitem{DBLP:conf/popl/OderskyZZ01}
M.~Odersky, C.~Zenger, and M.~Zenger.
\newblock Colored local type inference.
\newblock In {\em POPL}, pages 41--53, 2001.

\bibitem{DBLP:conf/oopsla/OderskyZ05}
M.~Odersky and M.~Zenger.
\newblock Scalable component abstractions.
\newblock In R.~Johnson and R.~P. Gabriel, editors, {\em OOPSLA}, pages 41--57.
  ACM, 2005.

\bibitem{DBLP:conf/oopsla/OssherH92}
H.~Ossher and W.~H. Harrison.
\newblock Combination of inheritance hierarchies.
\newblock In {\em OOPSLA}, pages 25--40, 1992.

\bibitem{DBLP:journals/tcs/PierceS97}
B.~C. Pierce and M.~Steffen.
\newblock Higher-order subtyping.
\newblock {\em Theor. Comput. Sci.}, 176(1-2):235--282, 1997.

\bibitem{DBLP:conf/programm/Reynolds74}
J.~C. Reynolds.
\newblock Towards a theory of type structure.
\newblock In B.~Robinet, editor, {\em Symposium on Programming}, volume~19 of
  {\em Lecture Notes in Computer Science}, pages 408--423. Springer, 1974.

\bibitem{DBLP:journals/entcs/Sheard07}
T.~Sheard.
\newblock Type-level computation using narrowing in {$\Omega$}mega.
\newblock {\em Electr. Notes Theor. Comput. Sci.}, 174(7):105--128, 2007.

\bibitem{SteffenThesis}
M.~Steffen.
\newblock {\em Polarized Higher-Order Subtyping}.
\newblock PhD thesis, Universit{\"a}t Erlangen-N{\"u}rnberg, 1998.

\bibitem{DBLP:conf/ecoop/ThorupT99}
K.~K. Thorup and M.~Torgersen.
\newblock Unifying genericity - combining the benefits of virtual types and
  parameterized classes.
\newblock In R.~Guerraoui, editor, {\em ECOOP}, volume 1628 of {\em Lecture
  Notes in Computer Science}, pages 186--204. Springer, 1999.

\bibitem{DBLP:journals/mscs/Wadler92}
P.~Wadler.
\newblock Comprehending monads.
\newblock {\em Mathematical Structures in Computer Science}, 2(4):461--493,
  1992.

\bibitem{DBLP:conf/afp/Wadler95}
P.~Wadler.
\newblock Monads for functional programming.
\newblock In J.~Jeuring and E.~Meijer, editors, {\em Advanced Functional
  Programming}, volume 925 of {\em Lecture Notes in Computer Science}, pages
  24--52. Springer, 1995.

\bibitem{DBLP:conf/popl/WadlerB89}
P.~Wadler and S.~Blott.
\newblock How to make ad-hoc polymorphism less ad-hoc.
\newblock In {\em POPL}, pages 60--76, 1989.

\bibitem{DBLP:conf/ecoop/WehrLT07}
S.~Wehr, R.~L{\"a}mmel, and P.~Thiemann.
\newblock {JavaGI} : Generalized interfaces for {J}ava.
\newblock In E.~Ernst, editor, {\em ECOOP}, volume 4609 of {\em Lecture Notes
  in Computer Science}, pages 347--372. Springer, 2007.

\end{thebibliography}



% \appendix
% % \flushpage
% \section{Typing and Kinding Rules}
% 
% \begin{figure}[hb]
% \begin{small}
%   \scalinadefnstyping 
% \end{small}
%   \caption{Term Classification}
%   \label{fig:termcls}
% \end{figure}
% 
% \begin{figure}
%   \begin{small}
%   \scalinadefnssubtyping
%   \end{small}
%   \caption{Subtyping}
%   \label{fig:subtyping}
% \end{figure}
% 
% 
% \begin{figure}
% \begin{small}
%   \scalinadefnstypeExp
% \end{small}
%   \caption{Type Expansion}
%   \label{fig:typeExp}
% \end{figure}
% 
% 
% \begin{figure}
% \begin{small}
%   \scalinadefnstypeNorm
%   \scalinadefnssubkinding
%   \scalinadefnsmsub
% \end{small}
%   \caption{Type Normalisation, Subkinding, and Subtyping for members}
%   \label{fig:subm}
%   \label{fig:typeNorm}
%   \label{fig:subkinding}
% \end{figure}
% 
% % : hack to avoid spilling into margin
% \begin{figure}
% \hspace{-1em}  \begin{minipage}{\columnwidth}
%  \begin{small}
%   \scalinadefnskinding
%   \end{small}
%   \end{minipage}
%   \caption{Classifying Types}
%   \label{fig:kinding}
% \end{figure}
% 
% 
% % \begin{figure}[p]
% %   \begin{small}
% %   \scalinadefnsmwf
% %   \end{small}
% %   \caption{Well-formedness of members}
% %   \label{fig:mwf}
% % \end{figure}



%\bibliography{final}

\end{document}
