from sklearn import svm
from sklearn import metrics
from sklearn.feature_extraction.text import CountVectorizer
import joblib
import pickle
import jieba
import numpy
import random

comma_tokenizer = lambda x: jieba.cut(x, cut_all=True)


# 调整了格式，一行是一条数据
def inputdata(filename):
    f = open(filename, 'r', encoding='utf-8')
    linelist = f.readlines()
    return linelist


# 按比例划分训练集与测试集
def splitdataset(dataset, splitratio):
    trainSize = int(len(dataset) * splitratio)
    trainSet = []
    copy = dataset
    while len(trainSet) < trainSize:
        index = random.randrange(len(copy))
        trainSet.append(copy.pop(index))
    return trainSet, copy


# 将问题和标签分割开来，返回words, tags
def splitset(trainset, testset):
    train_words = []
    train_tags = []
    test_words = []
    test_tags = []
    for i in trainset:
        i = i.strip()
        # index = i.index(':')
        train_words.append(i[:-2])
        # print i
        train_tags.append(int(i[-1]))

    for i in testset:
        i = i.strip()
        # index = i.index(':')
        test_words.append(i[:-2])
        # print i
        test_tags.append(int(i[-1]))

    return train_words, train_tags, test_words, test_tags


# 将训练集和测试集的问句向量化，为作为模型训练的输入做准备
def covectorize(train_words, test_words):
    # 创建词袋数据结构，里面设置相应的参数设置
    v = CountVectorizer(tokenizer=comma_tokenizer, binary=False, decode_error='replace', stop_words='english')
    # 拟合模型，返回文本矩阵
    train_data = v.fit_transform(train_words)
    test_data = v.transform(test_words)
    return train_data, test_data, v


# 创建svm分类器,返回clf  ,kenel:rbf,linear,poly
def train_clf(train_data, train_tags):
    clf = svm.SVC(C=10.0, cache_size=200, class_weight=None, coef0=0.0, decision_function_shape='ovr', degree=3,
                  gamma='auto', kernel='rbf', max_iter=-1, probability=False, random_state=None, shrinking=True,
                  tol=0.001, verbose=False)
    clf.fit(train_data, numpy.asarray(train_tags))

    return clf


# 得到准确率和召回率
def evaluate(actual, pred):
    m_precision = metrics.precision_score(actual, pred, average='macro')
    m_recall = metrics.recall_score(actual, pred, average='macro')
    print('precision:{0:.3f}'.format(m_precision))
    print('recall:{0:0.3f}'.format(m_recall))


# 用于预测时，出现错误
def covectorize_one_sent(ques, clf, v):
    list = []
    list.append(ques)
    pre_data = v.transform(list)
    pred_id = clf.predict(pre_data)
    return pred_id


# 将识别出的id转换成今天的type
def convertidtotype(typeid):
    if typeid == 1:
        return 'phe-->cause'
    if typeid == 2:
        return 'phe-->mnt'
    if typeid == 3:
        return 'cause-->phe'
    if typeid == 4:
        return 'mnt-->phe'
    if typeid == 5:
        return 'mnt-cause'


# 加载已经训练好的svm模型进行预测类别
def predbysvm(question):
    clf = joblib.load(r"F:\develop_code\py_code\demo\demo\model\svm.pkl")
    v = CountVectorizer(tokenizer=comma_tokenizer, binary=False, decode_error='replace', stop_words='english', vocabulary=pickle.load(open(r"F:\develop_code\py_code\demo\demo\model\v.pkl", "rb")))
    type_of_question = covectorize_one_sent(question, clf, v)  # type(type_of_question) = numpy.ndarray
    type_id = type_of_question.tolist()[0]
    print("SVM识别出的问题类型type_id:", type_id)
    real_type = convertidtotype(type_id)
    print("SVM识别出的问题类型real_type:", real_type)
    return type_id, real_type


if __name__ == '__main__':
    # linelist = inputdata('../label_data/handwork/question_type.txt')
    #
    # # 按比例划分成训练集和测试集
    # trainset, testset = splitdataset(linelist, 0.7)
    #
    # # 将训练集和测试集的问句以及类型划分开来
    # train_words, train_tags, test_words, test_tags = splitset(trainset, testset)
    #
    # # 将训练集和测试集向量化
    # train_data, test_data, v = covectorize(train_words, test_words)
    #
    # # SVM分类器模型构造与训练
    # clf = train_clf(train_data, train_tags)
    #
    # # 保存训练好的svm模型
    # joblib.dump(clf, "model/svm.pkl")
    #
    # # 保存特征，用于预测时
    # convectorpath = "model/v.pkl"
    # with open(convectorpath, 'wb') as fw:
    #     pickle.dump(v.vocabulary_, fw)

    # question = '滞塞这个原因会引发哪些故障'
    # type_of_question = covectorize_one_sent(question, clf, v)  # type(type_of_question) = numpy.ndarray
    # type_id = type_of_question.tolist()[0]
    # real_type = convertidtotype(type_id)
    # print(predbysvm('滞塞这个原因会引发哪些故障'))
    print(predbysvm('点蚀的原因是什么'))

    # 验证测试集上的准确率
    # re = clf.predict(test_data)
    # print("混淆矩阵:", metrics.accuracy_score(re, numpy.asanyarray(test_tags)))
    # print('分类报告:', metrics.classification_report(re, numpy.asanyarray(test_tags)))
    # evaluate(numpy.asarray(test_tags), re)

# 计算模型的准确度
# print("训练集准确率：",clf.score(train_x,train_y))
# print("\n")
# print("测试集准确率:",clf.score(test_x,test_y))
