#coding:utf8
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.svm.classes import LinearSVC, NuSVC, SVC
from nltk.metrics.association import BigramAssocMeasures
from nltk.collocations import BigramCollocationFinder
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.metrics.classification import accuracy_score
from analytical.feature_se import create_word_bigram_scores, find_best_words,\
    create_word_scores, create_bigram_scores
from analytical.cut_give import pos_features, neg_features
from sklearn.linear_model.logistic import LogisticRegression
import tkinter as tk
from  tkinter import ttk  

def dif_dimension(pos_filename,neg_filename,list1,list2,dimension):
    def best_word_features(words):  
        return dict([(word, True) for word in words if word in best_words])
    
    def best_bigram_features(words, score_fn=BigramAssocMeasures.chi_sq, n=1000):
        bigram_finder = BigramCollocationFinder.from_words(words)  
        bigrams = bigram_finder.nbest(score_fn, n) 
        return best_word_features(bigrams)
    
    def best_word_bigram_features(words, score_fn=BigramAssocMeasures.chi_sq, n=1000):
        bigram_finder = BigramCollocationFinder.from_words(words)
        bigrams = bigram_finder.nbest(score_fn, n)
        bg_wd = words + bigrams
        return best_word_features(bg_wd)
    
    def score(classifier):
        classifier = SklearnClassifier(classifier) #在nltk 中使用scikit-learn 的接口
        classifier.train(train) #训练分类器
    
        pred = classifier.classify_many(dev) #对开发测试集的数据进行分类，给出预测的标签
        return accuracy_score(tag_dev, pred) #对比分类预测结果和人工标注的正确结果，给出分类器准确度
    
    
    dimension = dimension.split(",")
    win=tk.Tk() 
    win.title("分类器运行结果") 
    tree=ttk.Treeview(win)#表格    
    
    table_all_f = {"BernoulliNB":BernoulliNB(),
                   "MultinomialNB":MultinomialNB(),
                   "LogisticRegression":LogisticRegression(),
                   "SVC":SVC(),
                   "LinearSVC":LinearSVC(),
                   "NuSVC":NuSVC()}
    table_ture_f = {}

    for i in list1:
        if i in table_all_f.keys():
            table_ture_f[i] = table_all_f[i]
    
    table_column = []
    table_column_f = []
    
    for key,value in table_ture_f.items():
        table_column.append(key)
        table_column_f.append(value)
    
    
    all_f_fea_name = ["best_word_features","best_bigram_features","best_word_bigram_features"]
    all_word_scores = [create_word_scores,create_bigram_scores,create_word_bigram_scores]   
    all_f_feature = [best_word_features,best_bigram_features,best_word_bigram_features]

    t = 0
    for i in range(len(all_f_fea_name)):
        if list2[0] == all_f_fea_name[i]:
            t = i
    global b1
    b1 = []     
    for d in dimension:
        word_scores = all_word_scores[t](pos_filename,neg_filename)
        best_words = find_best_words(word_scores, int(d))
       
        posFeatures = pos_features(all_f_feature[t],pos_filename)
        negFeatures = neg_features(all_f_feature[t],neg_filename)
           
        pos_len = len(posFeatures)
        train_len = int(pos_len*0.175)
        dectest_len = int(pos_len*0.125)
        train = posFeatures[train_len:]+negFeatures[train_len:]
        devtest = posFeatures[dectest_len:train_len]+negFeatures[dectest_len:train_len]
        dev, tag_dev = zip(*devtest)
         
        b = []
        for j in table_column_f:
            b.append(round(score(j),2)) #概率值
        b1.append(b)
        
    rs = b1
    name = dimension
    rs = list(map(list, zip(*rs)))
    tree["columns"] = name 
    for i in name:
        tree.column(i,width=50)   #表示列,不显示  
    for i in name:
        tree.heading(i,text=i)  #显示表头     
    for i in range(len(table_column)):
        tree.insert("",i,text=table_column[i] ,values=rs[i]) #插入数据 

    tree.pack()  
    win.mainloop() 