
<HTML> 
<HEAD> 
<meta content="text/html;charset=utf-8" http-equiv="Content-Type">
<link rel="stylesheet" href="nlp.css" type="text/css" media="all" /> 
<br>
<TITLE>NLP12 Assignment 2: Bayesian Curve Fitting, Classification</TITLE> 
</HEAD> 
 
<BODY> 
 
<h1>Assignment 2</h1>

<b>Shimi Malka 066461641</b>
<br>
<b>Netali Alima 300712742</b>

<h2>Our Solutions: </h2>
<ol>
<li>Polynomial Curve Fitting
   <ol>
   <li><a href="#syntheticdata">Synthetic Dataset Generation</a></li>
   <li><a href="#curvefitting">Polynomial Curve Fitting</a></li>
   <li><a href="#regularization">Polynomial Curve Fitting with Regularization</a></li>
   <li><a href="#q1_4">Probabilistic Regression Framework - Use Bayesian estimation to produce an interval estimation of the function y</a></li>
   </ol>
</li>
<li>Classification for Sentiment Analysis
   <ol>
   <li><a href="#q2_1">Baseline - Bag of words classifier</a></li>
   <li><a href="#q2_2">Data Exploration: Impact of Unknown Words</a></li>
   <li><a href="#q2_3">Improved feature extraction 1: most frequent, stop words</a></li>
   <li><a href="#q2_4">Improved feature extraction 2: exploit part of speech information</a></li>
   <li><a href="#q2_5">Improved feature extraction 3: bigrams</a></li>
   </ol>
</li>
</ol>

<h2>Q1: Polynomial Curve Fitting</h2>

<a name="syntheticdata"></a>
<h3>Q1.1 Synthetic Dataset Generation</h3>
<pre>
import matplotlib.pyplot as plt
import math
import numpy as np

# return a tuple with the 2 vectors x and t
# where the xi values are equi-distant on the [0,1] segment (that is, x1 = 0, x2=1/N-1, x3=2/N-1..., xN = 1.0)
# and t is ti = y(xi) + Normal(mu, sigma)
def generateDataset(N, f, sigma):
    x =np.linspace(0.0, 1.0, num=N) # (that is, x1 = 0, x2=1/N-1, x3=2/N-1..., xN = 1.0)
    t = [f(xi) + np.random.normal(0.0, sigma, 1)[0] for xi in x]
    return (x,t)

def makePlot(N, f, x, t, sigma):
    vf = np.vectorize(f)
    xx = np.linspace(0.0, 1.0, num=100)
    y = vf(xx)
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.scatter(x,t, s=50,marker= 'o',edgecolors ='blue', facecolor='none' )
    plt.plot(xx,y,lw=1,color= 'g')
    plt.title('sigma = %.2f' % sigma)
    plt.show()
 
def main():
    N = 50
    sigma = 00.3
    def f(x):
        return math.sin(2*math.pi*x)
    (x,t) = generateDataset(N, f,sigma);
    makePlot(N, f, x, t , sigma)
    
if __name__ == '__main__':
    main()    
</pre>
<img src="q1_1a.PNG" width="600" height="480" />
<img src="q1_1b.PNG" width="600" height="480" />

<a name="curvefitting"></a>
<h3>Q1.2 Polynomial Curve Fitting</h3>
<pre>
import numpy as np
import matplotlib.pyplot as plt
import scipy.linalg
import math

# return a tuple with the 2 vectors x and t
# where the xi values are equi-distant on the [0,1] segment (that is, x1 = 0, x2=1/N-1, x3=2/N-1..., xN = 1.0)
# and t is ti = y(xi) + Normal(mu, sigma)
def generateDataset(N, f, sigma):
    x =np.linspace(0.0, 1.0, num=N) # (that is, x1 = 0, x2=1/N-1, x3=2/N-1..., xN = 1.0)
    t = [f(xi) + np.random.normal(0.0, sigma) for xi in x]
    return (x,t)

# y(x) = w0 + w1x + w2x^2 + ... + wMx^M
def Y(w,x):
    return sum([wk*(math.pow(x,k)) for k,wk in enumerate(w)])

# E(w) = 0.5*sum(y(xi) - ti)^2 = 0.5*sum(sum(wk*xi^k) - ti)^2
def least_squares(w,x,t):
    return 0.5*sum([math.pow(sum([wk*(math.pow(xi,k)) for k,wk in enumerate(w)]) - ti , 2) for xi,ti in zip(x,t)])

# returns the optimal polynomial of degree M that approximates the dataset 
# according the least squares objective
#    W_LS = ((AT*A)^-1)*AT*t
#    A is a matrix of dimension NxM, W is a vector of dimension M and t is a vector of dimension N.)
def OptimizeLS(x, t, M):
    design_matrix = np.array([[math.pow(xi,m) for m in np.arange(M+1)] for xi in x])  # This is A
    prod = np.dot(design_matrix.T, design_matrix)      # prod is (AT*A)
    i = np.linalg.inv(prod)                            # i is (prod^-1)
    m = np.dot(i, design_matrix.T)                     # m is (i*AT)
    W_LS = np.dot(m, t)                                # w is (m*t)
    return W_LS

def makeSinPlot(x,t,f,M):
    xy =np.linspace(0.0, 1.0, num=100)
    vf = np.vectorize(f)
    y = vf(xy)
    fig = plt.figure()
    wls = OptimizeLS(x, t, M)
    w = [Y(wls,xi) for xi in xy]
    ax = fig.add_subplot(111)
    ax.scatter(x,t, s=50,marker = 'o', edgecolors ='blue' , facecolor='none' )
    plt.title('M = %d' % M)
    plt.plot(xy,w,'r-')
    plt.plot(xy,y,'g-')
    plt.show()
    
def main():
    N = 10
    def f(x): return math.sin(2*math.pi*x)
    (x,t) = generateDataset(N, f, 0.03);
    makeSinPlot(x,t,f,1)
    makeSinPlot(x,t,f,3)
    makeSinPlot(x,t,f,5)
    makeSinPlot(x,t,f,9)
    print "E(w1) =  %.8f" % least_squares(OptimizeLS(x, t, 1),x,t)
    print "E(w3) =  %.8f" % least_squares(OptimizeLS(x, t, 3),x,t)
    print "E(w5) =  %.8f" % least_squares(OptimizeLS(x, t, 5),x,t)
    print "E(w9) =  %.8f" % least_squares(OptimizeLS(x, t, 9),x,t)
    
if __name__ == '__main__':
    main()   
</pre>
<img src="q1_2a.PNG" width="600" height="480" />
<img src="q1_2b.PNG" width="600" height="480" />
<img src="q1_2c.PNG" width="600" height="480" />
<img src="q1_2d.PNG" width="600" height="480" />
<br>
Least Squares Error:<br> 
E(w1) =  1.34335743<br>
E(w3) =  0.03495009<br>
E(w5) =  0.00070755<br>
E(w9) =  0.00000015<br>
<b>So M=9 is the best and it overfitting to sin(2Πx)</b>

<a name="regularization"></a>
<h3>Q1.3 Polynomial Curve Fitting with Regularization</h3>
<pre>
import numpy as np
import matplotlib.pyplot as plt
import scipy.linalg
import math

# returns 3 pairs of vectors of size N each,
# (x-test, t-test), (x-validate, t-validate) and (x-train, t-train).
# The target values are generated with Gaussian noise N(0, sigma).
def  generateDataset3(N, f, sigma):
    x =np.linspace(0.0, 1.0, num=3*N) # (that is, x1 = 0, x2=1/N-1, x3=2/N-1..., xN = 1.0)
    pairs = [(xi, f(xi) + np.random.normal(0.0, sigma, 1)[0]) for xi in x]
    np.random.shuffle(pairs) 
    test = pairs[:N]
    train = pairs[N:N*2]
    dev =pairs[N*2:]
    xts , tts = zip(*test)
    xtr , ttr = zip(*train)
    xdv , tdv = zip(*dev)
    return ((xts,tts),(xtr,ttr),(xdv,tdv))

# y(x) = w0 + w1x + w2x^2 + ... + wMx^M
def Y(w,x):
    return sum([wk*(math.pow(x,k)) for k,wk in enumerate(w)])

# The normalized error of the model-
# NEw = ....
def normalized_error(w,x,t):
    return (1.0/len(t))*math.pow(sum([math.pow(ti - sum([wm*math.pow(xi, m) for m,wm in enumerate(w)]) , 2) for xi,ti in zip(x,t)]), 0.5)

# returns the optimal polynomial of degree M that approximates the dataset 
# according the least squares objective
def optimizePLS(x, t, M, lamda):
    design_matrix = np.array([[math.pow(xi,m) for m in np.arange(M+1)] for xi in x])  # This is A
    prod = np.dot(design_matrix.T, design_matrix)      # prod is (AT*A)
    lamdaI = lamda*np.eye(M+1)                         # id is lamda*I
    s = prod + lamdaI                                  # s is ((AT*A) + lamda*I)
    i = np.linalg.inv(s)                               # i is (s^-1)
    m = np.dot(i, design_matrix.T)                     # m is (i*AT)
    W_LS = np.dot(m, t)                                # w is (m*t)
    return W_LS

# selects the best value lambda given a dataset for
# training (xt, tt) and a validation test (xv, tv)
def optimize_PLS(xtr, ttr, xv, tv, M):
    bestNE = 100;
    for i in np.arange(-20,6):
        lamda = math.pow(math.e, i)
        w = optimizePLS(xtr, ttr, M, lamda)
        e = normalized_error(w,xv,tv)
        if(e < bestNE):
            bestNE = e
            WPLS = w 
    return WPLS

def normalized_errorPlot(sets, M ):
    (xts,tts) = sets[0] # test 
    (xtr,ttr) = sets[1] # train
    (xv,tv) = sets[2] # dev
    x = [] 
    val = []
    train = []
    test = []
    for i in np.arange(-20,6):
        x.append(i)       
        lamda = math.pow(math.e, i)
        w1 = optimizePLS(xtr, ttr, M, lamda)
        e = normalized_error(w1,xv,tv)
        train.append(e)    
        e = normalized_error(w1,xts,tts)
        test.append(e)
    plt.title('N = %d' % len(tv))
    plt.plot(x,train,lw=1,color= 'blue')
    plt.plot(x,test,lw=1,color= 'red')
    plt.legend(( 'Train', 'Test'),'upper center', shadow=True)
    plt.xlabel('ln(lambda)')
    plt.ylabel('NE')
    plt.show()
    
def makeSinPlot(sets,f,M):
    (xts,tts) = sets[0] # test 
    (xtr,ttr) = sets[1] # train
    (xv,tv) = sets[2] # dev
    xy =np.linspace(0.0, 1.0, num=100)
    vf = np.vectorize(f)
    y = vf(xy)
    fig = plt.figure()
    wls = optimize_PLS(xtr, ttr, xv, tv, M)
    w = [Y(wls,xi) for xi in xy]
    ax = fig.add_subplot(111)
    ats = ax.scatter(xts,tts, s=50,marker = 'o', edgecolors ='red' , facecolor='none' )
    atr = ax.scatter(xtr,ttr, s=50,marker = 'o', edgecolors ='blue' , facecolor='none' )
    av = ax.scatter(xv,tv, s=50,marker = 'o', edgecolors ='green' , facecolor='none' )
    plt.plot(xy,w,'r-')
    plt.plot(xy,y,'g-')
    plt.legend([ats,atr,av],['Test' , 'Train' , 'Development'],'upper right', shadow=True)
    plt.show()

    
def main():
    N = 10
    def f(x): return math.sin(2*math.pi*x)
    sets = generateDataset3(N, f, 0.03)
    normalized_errorPlot(sets, 9)
    makeSinPlot(sets,f,9)
    N = 100
    sets = generateDataset3(N, f, 0.03)
    normalized_errorPlot(sets, 9)
    makeSinPlot(sets,f,9)
    
if __name__ == '__main__':
    main() 
 </pre>
<img src="q1_3a.PNG" width="600" height="480" />
<img src="q1_3b.PNG" width="600" height="480" />
<img src="q1_3c.PNG" width="600" height="480" />
<img src="q1_3d.PNG" width="600" height="480" />
<br>
<a name="q1_4"></a>
<h3>Q1.4 Probabilistic Regression Framework - Use Bayesian estimation to produce an interval estimation of the function y</h3>

<pre>
import numpy as np
import matplotlib.pyplot as plt
import scipy.linalg
import math

# return a tuple with the 2 vectors x and t
# where the xi values are equi-distant on the [0,1] segment (that is, x1 = 0, x2=1/N-1, x3=2/N-1..., xN = 1.0)
# and t is ti = y(xi) + Normal(mu, sigma)
def generateDataset(N, f, sigma):
    x =np.linspace(0.0, 1.0, num=N) # (that is, x1 = 0, x2=1/N-1, x3=2/N-1..., xN = 1.0)
    t = [f(xi) + np.random.normal(0.0, sigma, 1)[0] for xi in x]
    return (x,t)

# return Q(x) = (Q0(x) ... QM(x))T = (1 x x^2 x^3 ... x^M)^T
def phi(x,M):
    fi = np.matrix([math.pow(x, i) for i in np.arange(0,M+1)])
    return fi.T 

def bayesianEstimator(X, t, M, alpha, sigma2):
    I = np.eye(M+1)          
    B = (1.0/sigma2)
    S = np.linalg.inv(alpha*I + B*sum([np.dot(phi(xn,M),phi(xn,M).T) for xn in X]))
    def m(x):
        su = sum([np.dot(phi(xn,M), tn) for xn,tn in zip(X,t)])
        Ssum = np.dot(S,su)
        BxTs = B * np.dot(phi(x,M).T, Ssum)
        r = BxTs.item(0)
        return  r
    def var(x):
        phix = phi(x,M)
        phiTS = np.dot(phix.T,S)
        return sigma2 + np.dot(phiTS, phix) 
    return (m, var)

def drawPlot(xt, t, f, m, var):
    x = np.linspace(0.0, 1.0, num=100)
    vf = np.vectorize(f)
    vm = np.vectorize(m)
    vvar = np.vectorize(var)
    y = vf(x)
    ym = vm(x)
    yvar = vvar(x)
    yvar2 = [math.pow(yvi,0.5) for yvi in yvar]
    yvarup = y + yvar2
    yvardown = y - yvar2
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.scatter(xt,t, s=50,marker= 'o',edgecolors ='blue', facecolor='none' )
    plt.plot(x,y,lw=2,color= 'green')
    plt.plot(x,ym,lw=2,color= 'red')
    plt.plot(x,yvarup,lw=1,color= 'red',alpha=0.3)
    plt.plot(x,yvardown,lw=1,color= 'red',alpha=0.3)
    plt.title('N = %d' % len(t))
    plt.show()

def run(N):
    def f(x): return math.sin(2*math.pi*x)
    (x,t) = generateDataset(N, f, 0.03);
    alpha = 0.005
    sigma2 = 1/11.1
    M = 9 
    (m,var) = bayesianEstimator(x, t, M, alpha, sigma2)
    drawPlot(x, t, f, m, var)

def main():    
    run(10)
    run(100)
    
if __name__ == '__main__':
    main() 
</pre>
<img src="q1_4a.PNG" width="600" height="480" />
<img src="q1_4b.PNG" width="600" height="480" />
<br>

<h2>Q2: Classification for Sentiment Analysis</h2>

<a name="q2_1"></a>
<h3>Q2.1: Baseline - Bag of words classifier</h3>
<br>The bag_of_words function:</br>
<pre>
# return a dictionary with all different words that apear in the document with the value True for each word  
def bag_of_words(document):
    return dict([(word, True) for word in document])
</pre> 

<br>The evaluate_features function:</br>
<pre>
    def evaluate_features(self,feature_extractor, N):
        self.negative = movie_reviews.fileids('neg') #list of all names of the documents under neg folder
        self.positive = movie_reviews.fileids('pos') #list of all names of the documents under pos folder
        self.maintrain, self.maintest = self.stratifiedSplit(self.negative, self.positive, N)
        lst = []
        trainvocabulary = []
        for doc,lbl in self.maintrain:
            x = (feature_extractor(movie_reviews.words(fileids=[doc])),lbl)
            lst.append(x)
            trainvocabulary = trainvocabulary + x[0].keys()
        trainvocabulary = set(trainvocabulary)
        if q2_1.W == 0:
            q2_1.W = len(trainvocabulary)
        print "no. of features in train:", self.W
        nb = classifier.train(lst)
        self.testClassify = self.classifyTest(self.maintest, nb, feature_extractor)
        print "accuracy = ", accuracy(self.maintest, self.testClassify)
        print "Negative:"
        print "    precision = ", self.calcPrec('neg', self.maintest, self.testClassify)
        print "    recall = ", self.calcRecall('neg', self.maintest, self.testClassify)
        print "    f measure = ", self.calcFMeasur('neg', self.maintest, self.testClassify)
        print "Positive:"
        print "    precision = ", self.calcPrec('pos', self.maintest, self.testClassify)
        print "    recall = ", self.calcRecall('pos', self.maintest, self.testClassify)
        print "    f measure = ", self.calcFMeasur('pos', self.maintest, self.testClassify)
        nb.show_most_informative_features()
        return nb
</pre>
<br>we used some helping functions here like (the implementation of this functions is at the end of this task):</br>
<br><li>stratifiedSplit: gets the negative and positive documents names as 2 lists and N,
and split the documents in the lists like this: the train set have N-1\N docs from positive and negative list,
and the test set gets the rest (1/N).</br>
<br><li>calcPrec, calcRecall, calcFMeasure - taken from the previous assignment for calculating precision,F Measure and recall.</br>
<br>Error Analysis:</br>
<br>10 features that involved in many wrong decisions:  [',', '.', 'and', 'the', 'to', 'a', 'in', 'is', 'of', 'that']</br>
<br>We think that is better to think about worst errors as features that apear many times in prediction errors and not features with bad odds because 
if a feature has bad odds there is a chance it will get the right label and when we looked on all the documents that classified with wrong labels we 
know for sure that these features could cause the classifier to predict the wrong label.</br>
</br>
The function that predict the worst errors:</br>
<pre>
	#return a list of document's names that the classifier predicted wrong label
    def error_prediction_docs(self, testSet, testClassify):
        ans = []
        for real,predict in izip(testSet, testClassify):
            if real != predict:
                realDoc, realLbl = real
                predDoc, predLbl = predict
                if predDoc == realDoc and realLbl != predLbl:
                    ans.append(realDoc)
        return ans
          
    #return k worst errors made by the classifier by returning the features that involved in many wrong decisions 
    #using the mainTest and testClassify  class parameters
    def worst_errors_many_wrong_decisions(self, k, feature_extractor):
        worst_errors = []
        features = []
        wrongDocs = self.error_prediction_docs(self.maintest, self.testClassify)
        for doc in wrongDocs:
            feature_dic = feature_extractor(movie_reviews.words(fileids=[doc]))
            features = features + feature_dic.keys()
        fd = FreqDist(feature.lower() for feature in features)
        for i in range(1, k+1):
            x = fd.max()
            fd.pop(x)
            worst_errors.append(x)
        return worst_errors
</pre>
<br>The Entire code for this task:<br/>
<pre>
from nltk.corpus import movie_reviews
import math
import random
from itertools import izip
from nltk.probability import FreqDist
from nltk.classify import NaiveBayesClassifier as classifier
from nltk.evaluate import accuracy

class q2_1(object):
    i = 0  
    W = 0
    
    #split to train and test one list of documents names
    def randomChoosDocs(self,documents, numDocs, label):
        train = []
        test = []
        tmpNum = 0
        while tmpNum < numDocs:
            r = int(random.uniform(1,len(documents)-1))
            train.append((documents.pop(r),label))
            tmpNum = tmpNum + 1
        for doc in documents:
            x = (doc,label)
            test.append(x)
        return train,test
    
    #performing the stratified split (training, test) dataset of (positive, negative) documents
    def stratifiedSplit(self,negative, positive, N): 
        if q2_1.i == 0:
            testSet = []
            trainSet = []
            percTrain = float(N-1)/float(N) # percent of how many documents should be in train set from each docs list
            noTrainNeg = math.ceil(percTrain*len(negative))# round up
            noTrainPos = math.ceil(percTrain*len(positive))# round up
            #insert train and test random docs from negative list
            tmptrain, tmptest = self.randomChoosDocs(negative, noTrainNeg,'neg')
            trainSet = trainSet + tmptrain
            testSet = testSet + tmptest
            #insert train and test random docs from positive list
            tmptrain, tmptest = self.randomChoosDocs(positive, noTrainPos,'pos')
            trainSet = trainSet + tmptrain
            testSet = testSet + tmptest
            q2_1.i = 1
            return trainSet, testSet
        else:
            return self.maintrain, self.maintest
    
    #return the test docs with the lables the classifier gives after training
    def classifyTest(self,test, classifier, feature_extractor):
        testClassifies = []
        for doc,lbl in test:
            tmpLbl = classifier.classify(feature_extractor(movie_reviews.words(fileids=[doc])))
            x = (doc,tmpLbl)
            testClassifies.append(x)
        return testClassifies
    
    #calc TP (label both in the test and by the classifier)
    def calcTP(self,label, testSet, classifierTest):
        tp = 0
        for x, y in izip(testSet, classifierTest):
            doc,lbl = x
            if x == y and lbl == label :
                tp += 1
        return tp
    
    #calc TN (non-label both in the test and by the classifier)    
    def calcTN(self,label, testSet, classifierTest):
        tn = 0
        for x, y in izip(testSet, classifierTest):
            testDoc,testLbl = x
            classifierDoc,classifierLbl = y
            if testDoc == classifierDoc and testLbl != label and classifierLbl != label :
                tn += 1
        return tn
        
    #calc FP (non-label by the test and label by the classifier)
    def calcFP(self,label, testSet, classifierTest):
        fp = 0
        for x, y in izip(testSet, classifierTest):
            testDoc,testLbl= x
            classifierDoc,classifierLbl = y
            if testDoc == classifierDoc and testLbl != label and classifierLbl == label :
                fp += 1
        return fp
    
    #calc FN (label by the test and non label by the classifier)
    def calcFN(self,label, TestSet, classifierTest):
        fn = 0
        for x, y in izip(TestSet, classifierTest):
            testDoc,testLbl = x
            classifierDoc,classifierLbl = y
            if testDoc == classifierDoc and testLbl == label and classifierLbl != label :
                fn += 1
        return fn
        
    #calc Precision(T) = TP / TP + FP
    def calcPrec(self,label, testSet, classifierTest):
        tp = self.calcTP(label, testSet, classifierTest)
        fp = self.calcFP(label, testSet, classifierTest)
        if tp+fp == 0:
            prec = 0
        else:
            prec = float(float(tp)/(tp+fp))
        return prec
    
    #calc Recall(T) = TP / TP + FN    
    def calcRecall(self,label, testSet, classifierTest):
        tp = self.calcTP(label, testSet, classifierTest)
        fn = self.calcFN(label, testSet, classifierTest)
        if tp + fn == 0:
            recall = 0
        else:
            recall = float(float(tp)/(tp+fn))
        return recall
        
    #calc F-Measure(T) = 2 x Precision x Recall / (Recall + Precision)  
    def calcFMeasur(self,label, testSet, classifierTest):
        prec = self.calcPrec(label, testSet, classifierTest)
        recall = self.calcRecall(label, testSet, classifierTest)
        if recall + prec == 0:
            fMeasure = 0
        else:
            fMeasure = float((2 * prec * recall)/(recall + prec))
        return fMeasure
            
    def evaluate_features(self,feature_extractor, N):
        self.negative = movie_reviews.fileids('neg') #list of all names of the documents under neg folder
        self.positive = movie_reviews.fileids('pos') #list of all names of the documents under pos folder
        self.maintrain, self.maintest = self.stratifiedSplit(self.negative, self.positive, N)
        lst = []
        trainvocabulary = []
        for doc,lbl in self.maintrain:
            x = (feature_extractor(movie_reviews.words(fileids=[doc])),lbl)
            lst.append(x)
            trainvocabulary = trainvocabulary + x[0].keys()
        trainvocabulary = set(trainvocabulary)
        if q2_1.W == 0:
            q2_1.W = len(trainvocabulary)
        print "no. of features in train:", self.W
        nb = classifier.train(lst)
        self.testClassify = self.classifyTest(self.maintest, nb, feature_extractor)
        print "accuracy = ", accuracy(self.maintest, self.testClassify)
        print "Negative:"
        print "    precision = ", self.calcPrec('neg', self.maintest, self.testClassify)
        print "    recall = ", self.calcRecall('neg', self.maintest, self.testClassify)
        print "    f measure = ", self.calcFMeasur('neg', self.maintest, self.testClassify)
        print "Positive:"
        print "    precision = ", self.calcPrec('pos', self.maintest, self.testClassify)
        print "    recall = ", self.calcRecall('pos', self.maintest, self.testClassify)
        print "    f measure = ", self.calcFMeasur('pos', self.maintest, self.testClassify)
        nb.show_most_informative_features()
        return nb
    
    #return a list of document's names that the classifier predicted wrong label
    def error_prediction_docs(self, testSet, testClassify):
        ans = []
        for real,predict in izip(testSet, testClassify):
            if real != predict:
                realDoc, realLbl = real
                predDoc, predLbl = predict
                if predDoc == realDoc and realLbl != predLbl:
                    ans.append(realDoc)
        return ans
          
    #return k worst errors made by the classifier by returning the features that involved in many wrong decisions 
    #using the mainTest and testClassify class parameters
    def worst_errors_many_wrong_decisions(self, k, feature_extractor):
        worst_errors = []
        features = []
        wrongDocs = self.error_prediction_docs(self.maintest, self.testClassify)
        for doc in wrongDocs:
            feature_dic = feature_extractor(movie_reviews.words(fileids=[doc]))
            features = features + feature_dic.keys()
        fd = FreqDist(feature.lower() for feature in features)
        for i in range(1, k+1):
            x = fd.max()
            fd.pop(x)
            worst_errors.append(x)
        return worst_errors
    
# return a dictionary with all different words that apear in the document with the value True for each word  
def bag_of_words(document):
    return dict([(word, True) for word in document])
        
def main():
    s = q2_1()
    nbClassifier = s.evaluate_features(bag_of_words,2.0)
    #k features that involved in many wrong decisions
    errors = s.worst_errors_many_wrong_decisions(10, bag_of_words)
    print "10 features that involved in many wrong decisions: ", errors
    
if __name__ == '__main__':
    main() 
</pre>
The output for this code is:</br>
accuracy =  0.642</br>
Negative:</br>
    precision =  0.986301369863</br>
    recall =  0.288</br>
    f measure =  0.445820433437</br>
Positive:</br>
    precision =  0.583138173302</br>
    recall =  0.996</br>
    f measure =  0.735598227474</br>
Most Informative Features</br>
              whatsoever = True              neg : pos    =     15.7 : 1.0</br>
                  finest = True              pos : neg    =     13.0 : 1.0</br>
             outstanding = True              pos : neg    =     11.8 : 1.0</br>
                poignant = True              pos : neg    =     11.0 : 1.0</br>
            embarrassing = True              neg : pos    =      9.8 : 1.0</br>
                   shine = True              pos : neg    =      9.7 : 1.0</br>
               ludicrous = True              neg : pos    =      9.0 : 1.0</br>
                  forgot = True              neg : pos    =      8.3 : 1.0</br>
                 noticed = True              pos : neg    =      8.3 : 1.0</br>
                cultural = True              pos : neg    =      8.3 : 1.0</br>
10 features that involved in many wrong decisions:  [',', '.', 'and', 'the', 'to', 'a', 'in', 'is', 'of', 'that']</br>
</br>
<a name="q2_2"></a>
<h3>Q2.2: Data Exploration: Impact of Unknown Words</h3>
The Code For This Task:</br>
<pre>
from nltk.corpus import movie_reviews
from q2_1 import q2_1, bag_of_words
import pylab
from nltk.evaluate import accuracy as _accuracy
from itertools import izip

class q2_2(object):
    #constructing classifier for N=2 using task 1 and return the classifier and the train and test sets for this classifier
    def build_Classifier_Train_Test(self, feature_extractor):
        self.q21 = q2_1()
        classifier = self.q21.evaluate_features(feature_extractor, 2)
        train = self.q21.maintrain
        test = self.q21.maintest
        testClaassify = self.q21.testClassify
        return classifier,train,test, testClaassify
    
    #given a document name and list of words that apear in the train set 
    #return the percentage of unknown words in the given document
    def doc_unknown_words(self, docName, trainWords):
        docWords = movie_reviews.words(fileids=[docName])
        diffs = list(set(docWords) - set(trainWords))
        return float(len(diffs))/float(len(docWords))
    
    #return list of pairs (doc name, unknown words perc) for each document in test
    def test_unknown_words(self, test, train):
        ans = []
        trainWords = []
        for doc,lbl in train:    
            tmp = movie_reviews.words(fileids=[doc])
            trainWords = trainWords + list(tmp)
        for doc,lbl in test:
            perc = self.doc_unknown_words(doc, trainWords)
            p = (doc,perc)
            ans.append(p)
        return ans
    
    #divide test's documents to 5 groups according the rate of unknown words
    #0-0.2 one group, 0.2-0.4 second group,0.4-0.6 third group,0.6-0.8 forth group, 0.8-1 fifth group
    def divide_test(self,test,train):
        grp1 = []
        grp2 = []
        grp3 = []
        grp4 = []
        grp5 = []
        docsPerc = self.test_unknown_words(test, train)
        self.plot_percentage_of_unknownWords(docsPerc) 
        for doc,perc in docsPerc:
            if perc < 0.01:
                grp1.append(doc)
            else:
                if perc >= 0.01 and perc < 0.02:
                    grp2.append(doc)
                else:
                    if perc >= 0.02 and perc < 0.03:
                        grp3.append(doc)
                    else:
                        if perc >= 0.03 and perc < 0.04:
                            grp4.append(doc)
                        else:
                            grp5.append(doc)
        return [grp1,grp2,grp3,grp4,grp5]
    
    #plot size of each group
    def plotSizes(self,groups):
        x = [1,2,3,4,5]
        y = [len(n) for n in groups]
        pylab.title('Size for each Group')
        pylab.xlabel('Groups')
        pylab.ylabel('Size')
        pylab.bar(x, y, width=0.1, facecolor='blue', align='center')
        pylab.grid(False)
        pylab.show()
        return
    
    #used for identify the 5 groups of documents
    def plot_percentage_of_unknownWords(self,docsPerc):
        grps = pylab.arange(1000)
        lens = [b for a,b in docsPerc]
        pylab.plot(grps, lens)
        pylab.title('Percentage of Unknown Words')
        pylab.xlabel('Docs')
        pylab.ylabel('Percentage')
        pylab.grid(False)
        pylab.show()
        return
    
    #count no. of positive and negative docs in group
    def count_pos_neg(self,group):
        pos = 0
        for doc in group:
            if "pos/" in doc:
                pos = pos + 1
        pos = float(pos)/float(len(group))
        neg = 1.0 - pos
        return pos,neg
    
    def plot_positive_negative_relative_no(self, groups):
        x = [1,2,3,4,5]
        y_pos = []
        y_neg = []
        for g in groups:
            pos,neg = self.count_pos_neg(g)
            y_pos.append(pos)
            y_neg.append(neg)
        #positive
        pylab.title('Percentage of Positive Documents')
        pylab.xlabel('Groups')
        pylab.ylabel('Percentage - Positive')
        pylab.bar(x, y_pos, width=0.1, facecolor='blue', align='center')
        pylab.grid(False)
        pylab.show()
        #negative
        pylab.title('Percentage of Negative Documents')
        pylab.xlabel('Groups')
        pylab.ylabel('Percentage - Negative')
        pylab.bar(x, y_neg, width=0.1, facecolor='blue', align='center')
        pylab.grid(False)
        pylab.show()
        return

    # gets list of lists of file's names belonging to 5 groups, the test set and the test classify by the classifier
    # and calculate accuracy, precision, recall and f-measure for each group
    def calculateAccuracyPrecRecall(self, groups, test, testClassify):
        accuracy = []
        precPos = []
        precNeg = []
        recallPos = []
        recallNeg = []
        groupTest = []
        groupTestClassify = []
        for group in groups:
            for doc in group:
                for element1, element2 in izip(test, testClassify):
                    tdoc,tlbl = element1
                    if tdoc == doc:
                        groupTest.append(element1)
                        groupTestClassify.append(element2)
            p = self.q21.calcPrec('pos',groupTest,groupTestClassify)
            precPos.append(p)
            p = self.q21.calcPrec('neg',groupTest,groupTestClassify)
            precNeg.append(p)
            r = self.q21.calcRecall('pos', groupTest, groupTestClassify)
            recallPos.append(r)
            r = self.q21.calcRecall('neg', groupTest,groupTestClassify)
            recallNeg.append(r)
            a = _accuracy(groupTest, groupTestClassify)
            accuracy.append(a)
            groupTest = []
            groupTestClassify = []
        return precPos, precNeg, recallPos, recallNeg, accuracy
    
    #ploting the calculation graphs for each group
    def plot_calculations(self,y, ylbl, strTitle):
        x = [1,2,3,4,5]
        pylab.bar(x, y, width=0.1, facecolor='blue', align='center')
        pylab.xlabel('Groups')
        pylab.ylabel(ylbl)
        pylab.title(strTitle)
        pylab.grid(False)
        pylab.show()
        return
    
def main():
    q22 = q2_2()
    c,train,test,testClassify = q22.build_Classifier_Train_Test(bag_of_words)
    groups = q22.divide_test(test, train)
    q22.plotSizes(groups)
    q22.plot_positive_negative_relative_no(groups)
    precPos, precNeg, recallPos, recallNeg , accuracy = q22.calculateAccuracyPrecRecall(groups, test, testClassify)
    q22.plot_calculations(precPos, 'Precision', 'Positive Precision for each Group')
    q22.plot_calculations(precNeg, 'Precision', 'Negative Precision for each Group')
    q22.plot_calculations(recallPos, 'Recall', 'Positive Recall for each Group')
    q22.plot_calculations(recallNeg, 'Recall', 'Negative Recall for each Group')
    q22.plot_calculations(accuracy, 'accuracy', 'Accuracy for each Group')
       
if __name__ == '__main__':
    main() 
</pre>
Percentage of Unknown Words:</br>
<img src="q2_1PercUnknown.png" width="600" height="480" /></br>
We decided to divide the Test Set to 5 groups according the graph above:</br>
<li>percentage < 0.1 -> group 1
<li>percentage >= 0.01 and percentage < 0.02 -> group 2
<li>percentage >= 0.02 and percentage < 0.03 -> group 3
<li>percentage >= 0.03 and percentage < 0.04 -> group 4
<li>percentage >= 0.04 -> group 5
</br>
Size of Each group:</br>
<img src="q2_1Size.png" width="600" height="480" />
</br>
Percentage of Positive Documents in each group:</br>
<img src="q2_1PercPos.png" width="600" height="480" />
</br>
Percentage of Negative Documents in each group:</br>
<img src="q2_1PercNeg.png" width="600" height="480" />
</br>
Accuracy for each group:</br>
<img src="q2_1Acc.png" width="600" height="480" />
</br>
Negative Precision for each group:</br>
<img src="q2_1PrecNeg.png" width="600" height="480" />
</br>
Positive Precision for each group:</br>
<img src="q2_1PrecPos.png" width="600" height="480" />
</br>
Negative Recall for each group:</br>
<img src="q2_1RecNeg.png" width="600" height="480" />
</br>
Positive Recall for each group:</br>
<img src="q2_1RecPos.png" width="600" height="480" />
</br>


<a name="q2_3"></a>
<h3>Q2.3: Improved feature extraction 1: most frequent, stop words</h3>
The entire Code for this task:</br>
<pre>
from nltk.corpus import movie_reviews
from nltk.corpus import stopwords
from nltk.probability import FreqDist
from q2_1 import q2_1, bag_of_words
from itertools import izip
from nltk.evaluate import accuracy
import pylab

stopset = set(stopwords.words('english'))

def stopword_remover(words): 
    return dict([(word, True) for word in words if word not in stopset])

def make_topK_non_stopword_extractor(K, stopwords):  
    #count for each word in the Data set how many times the word apears
    words = movie_reviews.words()
    fd = FreqDist(word.lower() for word in words)
    words = []
    #taking k most frequent words
    fd = fd.__iter__()
    while len(words)  < K:
        try:
            word = fd.next()    
            words.append(word)
        except StopIteration:
            break  
    #words contains k most frequent words
    def extractor(document): 
        ans = []     
        for word in document:
            if word in words:
                ans.append(word)
        #removing stop words and return the filtered features  
        return stopword_remover(ans)
    return extractor


#define 5 steps for K or less, xE(1..5), if K*x > no. of features in the train or x>=5
#                                           return no. of features in the train. 
def kVal(q21,x,K):
    if x*K > q21.W:
        return q21.W
    if x >=5:
        return q21.W
    return K*x

#crating graph of accuracy vs. K/W
def plotGraph(q21, K):
    x = []
    y = []
    for i in range(1,6):
        newK = kVal(q21, i, K)
        extractor = make_topK_non_stopword_extractor(newK, stopset)
        print "top K without stops words, K = ", newK, ":"
        classifier = q21.evaluate_features(extractor, 10)
        x.append(float(newK)/float(q21.W))
        acc = accuracy(q21.maintest, q21.testClassify)
        y.append(acc)
    pylab.bar(x, y, width=0.02, facecolor='blue', align='center')
    pylab.xlabel('K/W')
    pylab.ylabel("Accuracy")
    pylab.title("Accuracy for each K/W value")
    pylab.grid(False)
    pylab.show()
    return

# counting the no. of pos tags to documents that the classifier changed his decision after changing the feature extractor method     
def newTaggs(oldClassiffy, newClassify):
    noPos = 0
    noNeg = 0
    for x,y in izip(oldClassiffy,newClassify):
        if x[0] == y[0] and x[1] != y[1]:
            if y[1] == 'neg':
                noNeg = noNeg + 1
            else:
                noPos = noPos + 1 
    return noPos, noNeg

def main():
    q21 = q2_1()
    print "bag of words extractor:"
    firstClassifier = q21.evaluate_features(bag_of_words, 10)
    oldClassify = q21.testClassify
    print "top k frequent words without stop words extractor:"
    extractor = make_topK_non_stopword_extractor(10000, stopset)
    secondClassifier = q21.evaluate_features(extractor, 10)
    newClassify = q21.testClassify
    #identifying documents that classified differently and report new positive, new negative
    noPos, noNeg = newTaggs(oldClassify, newClassify)
    print "No of documents that the classifier classify them as pos in bag of words extractor and neg in top k extractor is:", noNeg
    print "No of documents that the classifier classify them as neg in bag of words extractor and pos in top k extractor is:", noPos
    #drawing plot of accuract vs. K/W
    plotGraph(q21, 5000)
    return

if __name__ == '__main__':
    main() 
</pre>

Compare the behavior of this new feature extractor with the baseline bag of words:</br>
<li>bag of words extractor:<br/>
accuracy =  0.755<br/>
Negative:<br/>
<table>
<tr><td/><td>precision</td><td>=</td><td>0.963636363636</td></tr>
<tr><td/><td>recall</td><td>=</td><td>0.53</td></tr>
<tr><td/><td>f measure</td><td>=</td><td>0.683870967742</td></tr>
</table>
Positive:<br/>
<table>
<tr><td/><td>precision</td><td>=</td><td>0.675862068966</td></tr>
<tr><td/><td>recall</td><td>=</td><td>0.98</td></tr>
<tr><td/><td>f measure</td><td>=</td><td>0.8</td></tr>
</table>
Most Informative Features:<br/>
<table>
<tr><td/><td>ludicrous = True</td><td>neg : pos</td><td>=</td><td>21.7 : 1.0</td></tr>
<tr><td/><td>sucks = True</td><td>neg : pos</td><td>=</td><td>16.3 : 1.0</td></tr>
<tr><td/><td>fascination = True</td><td>pos : neg</td><td>=</td><td>11.0 : 1.0</td></tr>
<tr><td/><td>slip = True</td><td>pos : neg</td><td>=</td><td>11.0 : 1.0</td></tr>
<tr><td/><td>stupidity = True</td><td>neg : pos</td><td>=</td><td>10.6 : 1.0</td></tr>
<tr><td/><td>poignant = True</td><td>pos : neg</td><td>=</td><td>10.6 : 1.0</td></tr>
<tr><td/><td>hudson = True</td><td>neg : pos</td><td>=</td><td>10.3 : 1.0</td></tr>
<tr><td/><td>3000 = True</td><td>neg : pos</td><td>=</td><td>10.3 : 1.0</td></tr>
<tr><td/><td>captures = True</td><td>pos : neg</td><td>=</td><td>10.2 : 1.0</td></tr>
<tr><td/><td>outstanding = True</td><td>pos : neg</td><td>=</td><td>10.1 : 1.0</td></tr>
</table>
<br/>
<li>top k frequent words without stop words extractor:<br/>
accuracy =  0.775<br/>
Negative:<br/>
<table>
<tr><td/><td>precision</td><td>=</td><td>0.982456140351</td></tr>
<tr><td/><td>recall</td><td>=</td><td>0.56</td></tr>
<tr><td/><td>f measure</td><td>=</td><td>0.713375796178</td></tr>
</table>
Positive:<br/>
<table>
<tr><td/><td>precision</td><td>=</td><td>0.692307692308</td></tr>
<tr><td/><td>recall</td><td>=</td><td>0.99</td></tr>
<tr><td/><td>f measure</td><td>=</td><td>0.814814814815</td></tr>
</table>
Most Informative Features:<br/>
<table>
<tr><td/><td>ludicrous = True</td><td>neg : pos</td><td>=</td><td>23.0 : 1.0</td></tr>
<tr><td/><td>effortlessly = True</td><td>pos : neg</td><td>=</td><td>13.0 : 1.0</td></tr>
<tr><td/><td>avoids = True</td><td>pos : neg</td><td>=</td><td>12.3 : 1.0</td></tr>
<tr><td/><td>outstanding = True</td><td>pos : neg</td><td>=</td><td>11.9 : 1.0</td></tr>
<tr><td/><td>astounding = True</td><td>pos : neg</td><td>=</td><td>10.3 : 1.0</td></tr>
<tr><td/><td>fascination = True</td><td>pos : neg</td><td>=</td><td>10.3 : 1.0</td></tr>
<tr><td/><td>hudson = True</td><td>neg : pos</td><td>=</td><td>10.3 : 1.0</td></tr>
<tr><td/><td>sucks = True</td><td>neg : pos</td><td>=</td><td>10.2 : 1.0</td></tr>
<tr><td/><td>unoriginal = True</td><td>neg : pos</td><td>=</td><td>9.7 : 1.0</td></tr>
<tr><td/><td>conveys = True</td><td>pos : neg</td><td>=</td><td>9.7 : 1.0</td></tr>
</table>
</br>
We can see that the new feature extractor have better results over the baseline feature extractor,
also the most informative features in the new feature extractor includes words that imply the right label 
like astounding that the meaning implies positive critics and not negative one, 
and in the baseline extractor we have 3000 that actually don't imply any label.</br>
<br/>
Optimizing K:</br>
W = 38089</br>
K = [5000,10000,15000,20000,38089]</br>
plot of accuracy vs. K/W.</br>
<img src="q2_3.png" width="600" height="480" />
</br>
We can see here that when K is small (5000) or big (20,000 , 38089) we get les accuracy 
and when K is somewhere in the middle we get higher accuracy.</br>
The conclusion is that K shouldn't be very high or very low, it should be some value in the middle.</br>
</br>
Identify documents which are classified differently by the 2 classifiers and report new_positives and new_negatives (K=10,000):<br>
<li> New Negative: No of documents that the classifier classify them as pos in bag of words extractor and neg in top k extractor is: 6</br>
<li> New Positive: No of documents that the classifier classify them as neg in bag of words extractor and pos in top k extractor is: 4</br>
</br>
Conclusion:<br>
We can see that the new extractor in some cases (with the right K value) improve the classification process and gets better accuracy.
<a name="q2_4"></a>
<h3>Q2.4: Improved feature extraction 2: exploit part of speech information</h3>
Code for this task:</br>
<pre>
import  nltk
from nltk.evaluate import accuracy
from nltk.corpus import brown
from q2_1 import q2_1
import pylab

#getting tagger trained by the brown corpus
def getTrainedTagger():
    train = brown.tagged_sents(simplify_tags=True)
    newTrain = []
    for sen in train:
        newSen = []
        for word,tag in sen:
            newSen.append((word.lower(),tag))
        newTrain.append(newSen)
    nn_tagger = nltk.DefaultTagger('NN')
    regexp_tagger = nltk.RegexpTagger([(r'^-?[0-9]+(.[0-9]+)?$', 'CD'),   # cardinal numbers
                                       (r'(The|the|A|a|An|an)$', 'AT'),   # articles
                                       (r'.*able$', 'JJ'),                # adjectives
                                       (r'.*ness$', 'NN'),                # nouns formed from adjectives
                                       (r'.*ly$', 'RB'),                  # adverbs
                                       (r'.*s$', 'NNS'),                  # plural nouns
                                       (r'.*ing$', 'VBG'),                # gerunds
                                       (r'.*ed$', 'VBD'),                 # past tense verbs
                                       (r'.*', 'NN')                      # nouns (default)
                                       ],backoff=nn_tagger)
    at2 = nltk.AffixTagger(newTrain, backoff=regexp_tagger)
    ut3 = nltk.UnigramTagger(newTrain, backoff=at2)
    ct2 = nltk.NgramTagger(2, newTrain, backoff=ut3)
    return ct2

def make_pos_extractor(pos):
    tagger = getTrainedTagger()
    def extractor(words):
        taggedWords = tagger.tag(words)
        return dict([(word, True) for word,tag in taggedWords if tag in pos])
    return extractor
#['ADJ', 'ADV', 'CNJ', 'DET', 'EX', 'FW', 'MOD', 'N', 'NP', 'NUM', 'PRO', 'P', 'TO', 'UH', 
#'V', 'VD', 'VG', 'VN', 'WH']
def main():
    q21 = q2_1()
    x = []
    y = []
    pos = ['N', 'VG', 'ADJ', 'ADV']
    print pos
    extractor = make_pos_extractor(pos)
    classifier = q21.evaluate_features(extractor, 10)
    x.append(1)
    acc = accuracy(q21.maintest, q21.testClassify)
    y.append(acc)
    
    pos = ['N', 'V', 'VG', 'VN', 'VN', 'ADJ', 'ADV']
    print pos
    extractor = make_pos_extractor(pos)
    classifier = q21.evaluate_features(extractor, 10)
    x.append(2)
    acc = accuracy(q21.maintest, q21.testClassify)
    y.append(acc)
    
    pos = ['V', 'ADJ', 'ADV']
    print pos
    extractor = make_pos_extractor(pos)
    classifier = q21.evaluate_features(extractor, 10)
    x.append(3)
    acc = accuracy(q21.maintest, q21.testClassify)
    y.append(acc)
    
    pos = ['ADJ', 'ADV']
    print pos
    extractor = make_pos_extractor(pos)
    classifier = q21.evaluate_features(extractor, 10)
    x.append(4)
    acc = accuracy(q21.maintest, q21.testClassify)
    y.append(acc)
    
    pos = ['N', 'ADJ', 'ADV']
    print pos
    extractor = make_pos_extractor(pos)
    classifier = q21.evaluate_features(extractor, 10)
    x.append(5)
    acc = accuracy(q21.maintest, q21.testClassify)
    y.append(acc)
    
    pylab.bar(x, y, width=0.02, facecolor='blue', align='center')
    pylab.xlabel('POS')
    pylab.ylabel("Accuracy")
    pylab.title("Accuracy for each pos set")
    pylab.grid(False)
    pylab.show()
    return
    
if __name__ == '__main__':
    main() 
</pre>
</br>
Graph accuracy vs. POS tags:</br>
<img src="q2_4.png" width="815" height="655" />
</br>
In the graph we can see that the best POS tags set for filtering features we came up with is: ['V', 'ADJ', 'ADV']</br>

<a name="q2_5"></a>
<h3>Q2.5: Improved feature extraction 3: bigrams</h3>
In this task we want to compare 4 kinds of feature extractors:</br>
<li>Bag Of Words Extractor
<li>Bigram Extractor
<li>All Bigram and Unigram Extractor
<li>Good Bigram and Unigram Extractor
</br></br>
Code For this task:
<pre>
from nltk.util import bigrams
from q2_1 import q2_1,bag_of_words
import itertools
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures

score = BigramAssocMeasures.chi_sq  # chi square measure of strength

def strong_bigrams(words, score_fn, n):
    bigram_finder = BigramCollocationFinder.from_words(words)
    bigrams = bigram_finder.nbest(score_fn, n)
    return [bigram for bigram in itertools.chain(words, bigrams)]

def make_bigram_extractor():
    def extractor(words):
        biWords = bigrams(words)
        return dict([(word1 + " " + word2,True) for word1,word2 in biWords])
    return extractor

def make_bigram_unigram_extractor():
    def extractor(words):
        biWords = [word1 + " " + word2 for word1,word2 in bigrams(words)]
        words = list(words) + list(biWords)
        return dict([(word,True) for word in words])
    return extractor

def make_good_bigram_unigram_extractor(n):
    def extractor(words):
        stBigrams = strong_bigrams(([word1 + " " + word2 for word1,word2 in bigrams(words)]), score , n)
        words = list(words) + list(stBigrams)
        return dict([(word,True) for word in words])
    return extractor

#when we run the same instance of evaluate_features few times, in the first time we define the train and test sets
#and in the other times we based on the same train and test set so we could be able to compare those methods    
def main():
    q21 = q2_1()
    
    print "bigram Extractor:"
    #evaluate bigram extractor
    extractor = make_bigram_extractor()
    classifier = q21.evaluate_features(extractor, 4)
    
    print "bag of words extractor:"
    #evaluate bag of words extractor
    extractor = bag_of_words
    classifier = q21.evaluate_features(extractor, 4)
    
    print "all bigram unigram extractor"
    #evaluate all bigrams and all unigrams extractor
    extractor = make_bigram_unigram_extractor()
    classifier = q21.evaluate_features(extractor, 4)
    
    print "good bigram unigram extractor:"
    #evaluate good bigrams and all unigrams extractor
    extractor = make_good_bigram_unigram_extractor(100)
    classifier = q21.evaluate_features(extractor, 4)
    
    return

if __name__ == '__main__':
    main() 
</pre>
</br>
The next table demonstrate the comparison between these 4 extractors:</br> 
<img src="q2_5.png"/>
</br>
The table shows that the last extractor (good Bigram and Unigram) is the most accurate,
but still the improvement from All Bigram extractor to good Bigram extractor isn't drastic.

<BR> 
<HR>
 <br>
</BODY>

