import pandas as pd
import numpy as np
from load_data import *
from scipy.sparse import coo_matrix
from sklearn import feature_extraction  
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, accuracy_score
from sklearn.metrics import classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn import svm
from sklearn import neighbors
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model.stochastic_gradient import SGDClassifier


def process_data():
    train_df, test_df = get_data()
    # df = train_df.merge(test_df)
    # df = pd.concat((train_df, test_df))
    df = train_df
    # print(df)
    labels = df['labels'].values.tolist()
    contents = df['review'].values.tolist() 

    #将文本中的词语转换为词频矩阵 矩阵元素a[i][j] 表示j词在i类文本下的词频
    vectorizer = CountVectorizer()

    #该类会统计每个词语的tf-idf权值
    transformer = TfidfTransformer()

    #第一个fit_transform是计算tf-idf 第二个fit_transform是将文本转为词频矩阵
    tfidf = transformer.fit_transform(vectorizer.fit_transform(contents))

    # 获取词袋模型中的所有词语  
    word = vectorizer.get_feature_names()
    print("单词数量:", len(word))

    #将tf-idf矩阵抽取出来，元素w[i][j]表示j词在i类文本中的tf-idf权重
    #X = tfidf.toarray()
    X = coo_matrix(tfidf, dtype=np.float32).toarray() #稀疏矩阵 注意float

    X_train, X_val, y_train, y_val = train_test_split(X, 
                                                    labels, 
                                                    test_size=0.3, 
                                                    random_state=1)

    X_test = transformer.transform(vectorizer.transform(test_df['review']))
    return X_train, X_val, y_train, y_val, X_test, test_df

def logistic_Regression():
    print('-'*50)
    X_train, X_val, y_train, y_val, X_test, test_df = process_data()
    clf = LogisticRegression(solver='liblinear')
    clf.fit(X_train, y_train)
    print('训练集 模型的准确度:{}'.format(clf.score(X_val, y_val)))
    pre = clf.predict(X_val)
    print('验证集')
    score("LogisticRegression", y_val, pre)
    to_file(X_test, test_df, clf, "LogisticRegression")

def bayes():
    print('-'*50)
    X_train, X_val, y_train, y_val, X_test, test_df = process_data()
    clf = MultinomialNB()
    clf.fit(X_train, y_train)
    print('训练集 模型的准确度:{}'.format(clf.score(X_val, y_val)))
    pre = clf.predict(X_val)
    print('验证集')
    score("MultinomialNB", y_val, pre)
    to_file(X_test, test_df, clf, "MultinomialNB")

def rf():
    print('-'*50)
    X_train, X_val, y_train, y_val, X_test, test_df = process_data()
    clf = RandomForestClassifier(n_estimators=20)
    clf.fit(X_train, y_train) 
    print('训练集 模型的准确度:{}'.format(clf.score(X_val, y_val)))
    
    pre = clf.predict(X_val)  
    print('验证集')
    score("RandomForest", y_val, pre)    
    to_file(X_test, test_df, clf, "RandomForest")

def Svm():
    print('-'*50)
    X_train, X_val, y_train, y_val, X_test, test_df = process_data()
    clf = svm.LinearSVC() #支持向量机分类器LinearSVC
    clf.fit(X_train, y_train)
    print('训练集 模型的准确度:{}'.format(clf.score(X_val, y_val)))
    
    pre = clf.predict(X_val)  
    print('验证集')
    score("SVM", y_val, pre)   
    to_file(X_test, test_df, clf, "SVM")   

def knn():
    print('-'*50)
    X_train, X_val, y_train, y_val, X_test, test_df = process_data()
    clf = neighbors.KNeighborsClassifier(n_neighbors=7) 
    clf.fit(X_train, y_train)
    print('训练集 模型的准确度:{}'.format(clf.score(X_val, y_val)))
    
    pre = clf.predict(X_val)  
    print('验证集')
    score("KNeighbors", y_val, pre)  
    to_file(X_test, test_df, clf, "KNeighbors")  

def decision_Tree():
    print('-'*50)
    X_train, X_val, y_train, y_val, X_test, test_df = process_data()
    clf = DecisionTreeClassifier()
    clf.fit(X_train, y_train)
    print('训练集 模型的准确度:{}'.format(clf.score(X_val, y_val)))
    
    pre = clf.predict(X_val)  
    print('验证集')
    score("DecisionTreeClassifier", y_val, pre)  
    to_file(X_test, test_df, clf, "DecisionTreeClassifier")    

def sgd():
    print('-'*50)
    X_train, X_val, y_train, y_val, X_test, test_df = process_data()
    clf = SGDClassifier()
    clf.fit(X_train, y_train)
    print('训练集 模型的准确度:{}'.format(clf.score(X_val, y_val)))
    
    pre = clf.predict(X_val)  
    print('验证集')
    score("SGDClassifier", y_val, pre)  
    to_file(X_test, test_df, clf, "SGDClassifier") 

def score(name, y_test, pred):

    print("算法评价:", name)
    acc = accuracy_score(y_test, pred)
    print('accuracy score ')
    print(acc)

    f1 = f1_score(y_test, pred)
    print('f1 score ')
    print(f1)

def to_file(X_test, test_df, clf, method):
    pred = clf.predict(X_test)
    test_df['sentiment'] = pred
    test_df['sentiment'] = test_df['sentiment'].apply(lambda x: 'positive' if x == 1 else 'negative')
    # X_test = X_test[['sentiment']]
    submission_df = pd.read_csv('data/submission.csv')#.iloc[:100]
    
    submission_df['sentiment'] = test_df['sentiment']
    submission_df.to_csv('data/{}_submission.csv'.format(method), index=False)

def mix():
    df_1 = pd.read_csv('data/LogisticRegression_submission.csv')
    df_2 = pd.read_csv('data/SGDClassifier_submission.csv')
    df_3 = pd.read_csv('data/SVM_submission.csv')
    for i in range(len(df_1)):
        d = {'positive':0, 'negative':0}
        d[df_1.loc[i, 'sentiment']] += 1
        d[df_2.loc[i, 'sentiment']] += 1
        d[df_3.loc[i, 'sentiment']] += 1
        if d['positive'] > d['negative']:
            df_1.loc[i, 'sentiment'] = 'positive'
        else:
            df_1.loc[i, 'sentiment'] = 'negative'
    df_1.to_csv('data/mixed_submission.csv', index=False)
    
if __name__=='__main__':
    # logistic_Regression()
    # bayes()
    # rf()
    # Svm()
    # decision_Tree()
    # sgd()
    mix()