# 预测文本的作者，步骤包括：
# 1.预处理文本数据：提取特征
# 2.加载已训练的模型
# 3.预测文本的作者

from sklearn.externals import joblib
import pickle
from RandomForest.utils import *
import pandas as pd
import numpy as np
import random


def random_list(start,stop,length):
    if length >= 0:
        length=int(length)
    start, stop = (int(start), int(stop)) if start <= stop else (int(stop), int(start))
    random_list = []
    for i in range(length):
        random_list.append(random.randint(start, stop))
    return random_list


model= joblib.load("alldoc/RFModel_4.pkl")

data_dict = {}
csv_data = pd.read_csv('alldoc/corpora_test_4.csv',encoding='utf-8')
# train_batch_data = csv_data[["txt_length", "ave_sentence_length", "ave_word_length", "richness_rate",
#                  "idiom_count", "emptyword_count", "question_mark_count","exclamation_mark",
#                   "noun_count", "verb_count"]]

train_batch_data = csv_data[["ave_sentence_length", "ave_word_length", "richness_rate",
                 "idiom_count", "emptyword_count", "question_mark_count","exclamation_mark"]]

target_data = csv_data[['label']]
target_data_np = np.array(target_data)
realname_lst = []
for i in range(0,target_data_np.size):
    item = target_data_np[i][0]
    realname_lst.append(item)

data_dict['data'] = [item for item in train_batch_data.values]
# data_dict['feature_names'] = ["txt_length", "ave_sentence_length", "ave_word_length", "richness_rate",
#                  "idiom_count", "emptyword_count", "question_mark_count","exclamation_mark",
#                   "noun_count", "verb_count"]
data_dict['feature_names'] = ["ave_sentence_length", "ave_word_length", "richness_rate",
                 "idiom_count", "emptyword_count", "question_mark_count","exclamation_mark"]

# 需知道需分类的作者名和编号
target_names = open('alldoc/corpora_train.label_to_idx','r').readlines()
target_names_lst = []
for i in target_names:
    if i!= None:
        target_names_lst.append(i.replace('\n',''))
data_dict['target_names'] = target_names_lst

data = np.array(data_dict['data'])
feature_names = np.array(data_dict['feature_names'])
target_names = np.array(data_dict['target_names'])
# print(target_names)
df = pd.DataFrame(data, columns=feature_names)
test = df
# print(test)

features = df.columns[:7]
# print(features)
# print(model.predict(test[features]))
# print(model.predict)
preds = target_names[model.predict(test[features])]
# print(type(preds))
# print(preds[0])
count = 0
# print('predict','actural')
accuracy_dict = {}

for i in range(0,len(realname_lst)):
    # print(i,preds[i],realname_lst[i].replace(' ',''))
    if preds[i] == realname_lst[i]:
        count = count+1

accuracy_rate = float(count/len(realname_lst))
print(accuracy_rate)

# s = pd.crosstab(realname_lst, preds, rownames=['actual'], colnames=['preds'])
# print(s)