from nltk.tokenize import word_tokenize
import pandas as pd
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC     
# from data import read_dataset
from gensim.models.doc2vec import Doc2Vec, TaggedDocument

from sklearn.ensemble import RandomForestClassifier
import numpy as np

from tqdm import tqdm
from progress.bar import IncrementalBar
import xgboost as xgb

dataset_root = r'./PanJiu/data/'

# import nltk
# nltk.set_proxy('http://127.0.0.1:10808')
# nltk.download('punkt_tab')

def read_dataset(dataset_path):
    dataset = pd.read_csv(dataset_path, sep='\t', encoding='utf-8', header=None, names=['sn','msg', 'label'], on_bad_lines='skip')
    return dataset.to_dict(orient='records')

###采用doc2vec方式来embedding
class EmbeddingModel(object):
    def __init__(self,dataset_path):
        # self.dataset = read_dataset(dataset_path)
        dataset = pd.read_csv(dataset_path, sep='\t', encoding='utf-8', header=None, names=['msg'], on_bad_lines='skip')
        self.dataset = dataset.to_dict(orient='records')
        self.build_embedding_model()

    def tokenizer(self):
        tokenized_sent = []
        for s in self.dataset:
            tokenized_sent.append(word_tokenize(s['msg'].lower()))
        return tokenized_sent

    def build_embedding_model(self):
        # 初始化标记数据列表
        tagged_data = []
        # 获取分词后的句子
        tokenized_sent = self.tokenizer()
        # 初始化进度条
        bar = IncrementalBar("tokenized_sent:", max=len(tokenized_sent))
        for i, d in enumerate(tokenized_sent):
            bar.next()
            # 为每个分词后的句子创建一个 TaggedDocument 对象
            tagged_data.append(TaggedDocument(d, [i]))
        # 完成进度条
        bar.finish()
        # 训练 Doc2Vec 模型
        self.model = Doc2Vec(tagged_data, vector_size=10, window=3, min_count=1, epochs=10)

    def build_train_label_feature(self,train_data,test_type = None):
        train_data_ = read_dataset(train_data)
        bar = IncrementalBar("train embedding:",max = len(train_data))
        train_feature = []
        label_list = []
        for text in train_data_:
            bar.next()
            if isinstance(text['msg'], str):
                text_ = word_tokenize(text['msg'].lower())
                train_feature.append(self.model.infer_vector(text_))
                label_list.append(int(text['label']))
            bar.finish()

        if test_type == "test":
            return self.split_train_test(np.array(train_feature),np.array(label_list))
        else:
            return np.array(train_feature),np.array(label_list)

    def build_test_label_feature(self,test_data):
        test_data_ = read_dataset(test_data)
        bar = IncrementalBar("train embedding:",max = len(test_data))
        train_feature = []
        for text in test_data_:
            bar.next()
            text_ = word_tokenize(text['msg'].lower())
            train_feature.append(self.model.infer_vector(text_))
            bar.finish()
        return np.array(train_feature)


    def split_train_test(self,train_feature,label_list):
        X_train,X_test,y_train,y_test = train_test_split(train_feature,label_list,test_size=0.3)
        return X_train,X_test,y_train,y_test

def macro_f1_score(f1_score):
    weights = [1/7,1/7,2/7,3/7]
    score = f1_score[0]* weights[0] + f1_score[1]*weights[1] + f1_score[2]*weights[2] + f1_score[3]*weights[3]
    return score

class RFModel(object):
    def __init__(self):
        self.rf = RandomForestClassifier(oob_score =True)

    ##传统ML并没有batch_size区别 
    def train(self,train_feature,train_label):
        self.rf.fit(train_feature,train_label)

    def predict(self,test_feature):
        return self.rf.predict(test_feature)

    def test(self,test_feature,test_label):
        y_pred = self.rf.predict(test_feature)
        f1_score_result  = f1_score(test_label,y_pred,average=None)
        print("RFModel f1_score:{}".format(macro_f1_score(f1_score_result)))


class SVMModel(object):
    def __init__(self):
        self.svm = SVC(C=700.0, cache_size=200, class_weight="balanced", coef0=0.0, decision_function_shape="ovr", degree=3,
                  gamma='auto', kernel='rbf', max_iter=-1, probability=False, random_state=None, shrinking=True,
                  tol=0.001, verbose=False)
    ##传统ML并没有batch_size区别 
    def train(self,train_feature,train_label):
        self.svm.fit(train_feature,train_label)

    def predict(self,test_feature):
        return self.svm.predict(test_feature)

    def test(self,test_feature,test_label):
        y_pred = self.svm.predict(test_feature)
        f1_score_result  = f1_score(test_label,y_pred,average=None)
        print("SVMModel f1_score:{}".format(macro_f1_score(f1_score_result)))

class XgboostModel(object):
    def __init__(self):
        # self.svm = SVC(gamma='scale', C=1.0, decision_function_shape='ovr', kernel='rbf')
        ##设置初始化参数
        self.params = {
            'booster':'gbtree',
            'objective':'multi:softmax',
            'num_class':4,
            'gamma':0.1,
            'max_depth':6,
            'lambda':2,
            'subsample':0.7,
            'colsample_bytree':0.7,
            'min_child_weight':3,
            'slient':1,
            'eta':0.1,
            'seed':1000,
            'nthread':4,
            }

        self.num_round = 100

    ##传统ML并没有batch_size区别 
    def train(self,train_feature,train_label):
        ###转成xgboost框架
        train_data = xgb.DMatrix(train_feature, label=train_label)
        self.xgb  = xgb.train(self.params,train_data,self.num_round)

    def predict(self,test_feature):
        test_data = xgb.DMatrix(test_feature)
        return self.xgb.predict(test_data)

    def test(self,test_feature,test_label):
        test_data = xgb.DMatrix(test_feature)
        y_pred = self.xgb.predict(test_data)
        f1_score_result  = f1_score(test_label,y_pred,average=None)
        print("XgboostModel f1_score:{}".format(macro_f1_score(f1_score_result)))

def submit(y_pred):
    submit = pd.read_csv(dataset_root + 'preliminary_submit_dataset_a.csv')
    submit["label"] = y_pred
    submit.to_csv(dataset_root + "preliminary_pred_df.csv",index=0)


if __name__ == "__main__":
    corpus_data = dataset_root + "notebook/corpus_test.csv"
    train_data = dataset_root + "notebook/train_test.csv"
    test_data = dataset_root + "notebook/test_test.csv"
    embedding_model = EmbeddingModel(corpus_data)
    X_train,X_test,y_train,y_test = embedding_model.build_train_label_feature(train_data,"test")
    
    # rf_model = RFModel()
    # rf_model.train(X_train,y_train)
    # rf_model.test(X_test,y_test)
    
    # X_train,y_train = embedding_model.build_train_label_feature(train_data)
    # X_test = embedding_model.build_test_label_feature(test_data)
    
    # svm_model = SVMModel()
    # svm_model.train(X_train,y_train)
    # svm_model.test(X_test,y_test)
    
    xgb_model = XgboostModel()
    xgb_model.train(X_train,y_train)
    xgb_model.test(X_test,y_test)