from sklearn.ensemble import RandomForestClassifier
import pandas as pd
import numpy as np
from sklearn.externals import joblib
from random import choice

# 特征1：文章长度
# 特征2：句子长度
# 特征3：平均词长
# 特征4：文章的词汇丰富率
# 特征5：包含不同成语的个数
# 特征6： 虚词的个数
# 特征7： 问号的个数
# 特征8：感叹号的个数
# 特征9：名词的个数
# 特征10： 动词的个数

def load_tag_to_idx(filename):
    # print("loading tag_to_idx...")
    tag_to_idx = {}
    fo = open(filename)
    for line in fo:
        line = line.strip().lstrip().rstrip()
        tag_to_idx[line] = len(tag_to_idx)
    fo.close()
    return tag_to_idx

data_dict = {}

csv_data = pd.read_csv('alldoc/corpora_train_4.csv')  # 读取训练数据
# print(csv_data.loc[3:6])
# N = 65
# csv_batch_data = csv_data.tail(N)  # 取后N条数据
# print(csv_batch_data.shape)  # (数据的结构)

# train_batch_data = csv_data[["txt_length", "ave_sentence_length", "ave_word_length", "richness_rate",
#                  "idiom_count", "emptyword_count", "question_mark_count","exclamation_mark",
#                   "noun_count", "verb_count"]]  #

train_batch_data = csv_data[["ave_sentence_length", "ave_word_length", "richness_rate",
                 "idiom_count", "emptyword_count", "question_mark_count","exclamation_mark"]]

data_dict['data'] = [item for item in train_batch_data.values]

target_data = csv_data[['label']]
# target_data_np = np.array(target_data,dtype='UTF-8')
tag_to_idx = load_tag_to_idx('alldoc/corpora_train.label_to_idx')
# print(tag_to_idx)
data_dict['target'] = []
for item in target_data.values:
     # data_dict['target'].append(tag_to_idx[item[0]])
     name = item[0].replace(' ', '')
     data_dict['target'].append(tag_to_idx[name])

# data_dict['target'] = [item[0] for item in target_data.values]

# data_dict['feature_names'] = ["txt_length", "ave_sentence_length", "ave_word_length", "richness_rate",
#                  "idiom_count", "emptyword_count", "question_mark_count","exclamation_mark",
#                   "noun_count", "verb_count"]
data_dict['feature_names'] = ["ave_sentence_length", "ave_word_length", "richness_rate",
                 "idiom_count", "emptyword_count", "question_mark_count","exclamation_mark"]

target_names = open('alldoc/corpora_train.label_to_idx','r').readlines()
target_names_lst = []
for i in target_names:
    if i!=None:
        target_names_lst.append(i.replace('\n',''))

# data_dict['target_names'] = [u'张文中',u'文宇晴',u'施英',u'曾伯炎',u'李金芳',u'杨光',u'王净文']
data_dict['target_names'] = target_names_lst

# print(data_dict)
data = np.array(data_dict['data'])
feature_names = np.array(data_dict['feature_names'])
target=np.array(data_dict['target'])
target_names=np.array(data_dict['target_names'])

df = pd.DataFrame(data, columns= feature_names)
df['species'] = pd.Categorical.from_codes(target, target_names)
train = df

features = df.columns[:7]
clf = RandomForestClassifier(n_jobs=-1)
y, _ = pd.factorize(train['species'])
clf.fit(train[features], y)

joblib.dump(clf, 'alldoc/RFModel_4.pkl',compress=3) # 可查看compress参数的设置