#coding:utf8
import pickle
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from nltk.classify.scikitlearn import SklearnClassifier
from analytical.cut_give import neg_features, pos_features
from analytical.feature_se import find_best_words, create_word_bigram_scores,\
    create_bigram_scores, create_word_scores
from nltk.collocations import BigramCollocationFinder
from nltk.metrics.association import BigramAssocMeasures
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.svm.classes import SVC, LinearSVC, NuSVC
from analytical.feature_ex import bag_of_words, bigram, bigram_words
import os


def save_classifier(pos_filename,neg_filename,best_fea,best_cla_name,best_dis):
    
    def best_word_features(words):  
        return dict([(word, True) for word in words if word in best_words])
    
    def best_bigram_features(words, score_fn=BigramAssocMeasures.chi_sq, n=1000):
        bigram_finder = BigramCollocationFinder.from_words(words)  
        bigrams = bigram_finder.nbest(score_fn, n) 
        return best_word_features(bigrams)
    
    def best_word_bigram_features(words, score_fn=BigramAssocMeasures.chi_sq, n=1000):
        bigram_finder = BigramCollocationFinder.from_words(words)
        bigrams = bigram_finder.nbest(score_fn, n)
        bg_wd = words + bigrams
        return best_word_features(bg_wd)

    table_all_f = {"BernoulliNB":BernoulliNB(),
                   "MultinomialNB":MultinomialNB(),
                   "LogisticRegression":LogisticRegression(),
                   "SVC":SVC(),
                   "LinearSVC":LinearSVC(),
                   "NuSVC":NuSVC()}
    
    best_cla = []
    
    for key,value in table_all_f.items():
        if key == best_cla_name[0]:
            best_cla.append(value)
            
    all_feature = [bag_of_words,bigram,bigram_words]
    all_fea_name = ["bag_of_words","bigram","bigram_words"]
    all_f_fea_name = ["best_word_features","best_bigram_features","best_word_bigram_features"]
    all_word_scores = [create_word_scores,create_bigram_scores,create_word_bigram_scores]   
    all_f_feature = [best_word_features,best_bigram_features,best_word_bigram_features]
    
    fen = []
    
    if best_fea[0] in all_f_fea_name:
        t = 0
        for i in range(len(all_f_fea_name)):
            if all_f_fea_name[i] == best_fea[0]:
                t = i
        
        word_scores = all_word_scores[t](pos_filename,neg_filename)
        best_words = find_best_words(word_scores, int(best_dis))
        fen.append(best_words)
        posFeatures = pos_features(all_f_feature[t],pos_filename)
        negFeatures = neg_features(all_f_feature[t],neg_filename)
        
        
        
    elif best_fea[0] in all_fea_name:
        t = 0
        for i in range(len(all_fea_name)):
            if all_fea_name[i] == best_fea[0]:
                t = i
        posFeatures = pos_features(all_feature[t],pos_filename)
        negFeatures = neg_features(all_feature[t],neg_filename)
      
    trainSet = posFeatures + negFeatures
      
    rs_classifier = SklearnClassifier(best_cla[0])
    rs_classifier.train(trainSet)
    fen.append(rs_classifier)
    data_dir = os.path.dirname(os.path.dirname(os.getcwd()))+'\\data\\classifier.pkl'
    pickle.dump(fen, open(data_dir,'wb'))
    
    

