#coding:utf8
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.svm import SVC, LinearSVC, NuSVC
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from analytical.feature_ex import *
from analytical.cut_give import pos_features, neg_features
from analytical.feature_se import create_word_scores, find_best_words,\
    create_bigram_scores, create_word_bigram_scores
import tkinter as tk
from  tkinter import ttk   

def dif_method(pos_filename,neg_filename,list1,list2):
    def best_word_features(words):  
        return dict([(word, True) for word in words if word in best_words])
    
    def best_bigram_features(words, score_fn=BigramAssocMeasures.chi_sq, n=1000):
        bigram_finder = BigramCollocationFinder.from_words(words)  
        bigrams = bigram_finder.nbest(score_fn, n) 
        return best_word_features(bigrams)
    
    def best_word_bigram_features(words, score_fn=BigramAssocMeasures.chi_sq, n=1000):
        bigram_finder = BigramCollocationFinder.from_words(words)
        bigrams = bigram_finder.nbest(score_fn, n)
        bg_wd = words + bigrams
        return best_word_features(bg_wd)
    
    
    def score(classifier):
        classifier = SklearnClassifier(classifier) #在nltk 中使用scikit-learn 的接口
        classifier.train(train) #训练分类器
    
        pred = classifier.classify_many(dev) #对开发测试集的数据进行分类，给出预测的标签
        return accuracy_score(tag_dev, pred) #对比分类预测结果和人工标注的正确结果，给出分类器准确度
    
    table_all_f = {"BernoulliNB":BernoulliNB(),
                   "MultinomialNB":MultinomialNB(),
                   "LogisticRegression":LogisticRegression(),
                   "SVC":SVC(),
                   "LinearSVC":LinearSVC(),
                   "NuSVC":NuSVC()}
    table_ture_f = {}

    for i in list1:
        if i in table_all_f.keys():
            table_ture_f[i] = table_all_f[i]
    
    table_column = []
    table_column_f = []
    
    for key,value in table_ture_f.items():
        table_column.append(key)
        table_column_f.append(value)
    
    win=tk.Tk() 
    win.title("分类器运行结果") 
    tree=ttk.Treeview(win)#表格   
           
    
    
    
    table_all_t = {"bag_of_words":bag_of_words,
                "bigram":bigram,
                "bigram_words":bigram_words,
                "best_word_features":best_word_features,
                "best_bigram_features":best_bigram_features,
                "best_word_bigram_features":best_word_bigram_features}
    table_ture_t = {}

    for i in list2:
        if i in table_all_t.keys():
            table_ture_t[i] = table_all_t[i]
    
    #使用所有词作为特征 、使用双词作为特征、使用所有词和双词作为特征
    all_feature = [bag_of_words,bigram,bigram_words] #特征函数
    feature = []
    for i in all_feature:
        if i in table_ture_t.values():
            feature.append(i)

    all_fea_name = ["bag_of_words","bigram","bigram_words"] #特征函数名
    fea_name = []
    for i in all_fea_name:
        if i in table_ture_t.keys():
            fea_name.append(i)
    
    global b1
    b1 = []
    for i in range(len(feature)):    
        posFeatures = pos_features(feature[i],pos_filename)
        negFeatures = neg_features(feature[i],neg_filename)
        pos_len = len(posFeatures)
        train_len = int(pos_len*0.175)
        dectest_len = int(pos_len*0.125)
        train = posFeatures[train_len:]+negFeatures[train_len:]
        devtest = posFeatures[dectest_len:train_len]+negFeatures[dectest_len:train_len]
        dev, tag_dev = zip(*devtest)
        
        
        b = []       
        for j in table_column_f:
            b.append(round(score(j),2)) #概率值
        b1.append(b)    
  

    
    all_f_feature = [best_word_features,best_bigram_features,best_word_bigram_features]
    all_f_fea_name = ["best_word_features","best_bigram_features","best_word_bigram_features"]
    all_word_scores = [create_word_scores,create_bigram_scores,create_word_bigram_scores]   
    
    f_feature = []
    l = []
    for i in range(len(all_f_feature)):
        if all_f_feature[i] in table_ture_t.values():
            f_feature.append(all_f_feature[i])
            l.append(i)
            
    f_fea_name = []
    for i in all_f_fea_name:
        if i in table_ture_t.keys():
            f_fea_name.append(i)
            
    word_scores_list = []
    for i in l:
        word_scores_list.append(all_word_scores[i])
    #使用所有词形式、双词形式、所有词和双词形式选择信息量最丰富的1500个的特征 
    global c1
    c1 = []
    for i in range(len(word_scores_list)):
        word_scores = word_scores_list[i](pos_filename,neg_filename)
        best_words = find_best_words(word_scores, 1500) 
          
        posFeatures = pos_features(f_feature[i],pos_filename)
        negFeatures = neg_features(f_feature[i],neg_filename)
        pos_len = len(posFeatures)
        train_len = int(pos_len*0.175)
        dectest_len = int(pos_len*0.125)
        train = posFeatures[train_len:]+negFeatures[train_len:]
        devtest = posFeatures[dectest_len:train_len]+negFeatures[dectest_len:train_len]
        dev, tag_dev = zip(*devtest)
         
        c = []
        for j in table_column_f:
            c.append(round(score(j),2)) #概率值
        c1.append(c)    

    
    rs = b1 + c1 
    name = fea_name + f_fea_name
    rs = list(map(list, zip(*rs)))
    tree["columns"] = name 
    for i in name:
        tree.column(i,width=170)   #表示列,不显示  
    for i in name:
        tree.heading(i,text=i)  #显示表头     
    for i in range(len(table_column)):
        tree.insert("",i,text=table_column[i] ,values=rs[i]) #插入数据 

    tree.pack()  
    win.mainloop() 

