import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import warnings

warnings.filterwarnings("ignore")


def decision_tree(data_train, depth, leaf_num, split_num):
    data_train['repost_hat'] = 0
    data_train['comments_hat'] = 0
    data_train['likes_hat'] = 0

    # 噪音数据（is_noise）预测值均为0,因此仅处理非噪音数据(此处替换为logit)
    train = data_train[data_train['logit'] == 0]
    test = data_train[data_train['logit'] == 0]

    # 分别对转发，评论和点赞建立三棵树
    tree_repost = DecisionTreeRegressor(criterion='mse', min_samples_leaf=leaf_num, max_depth=depth,
                                        min_samples_split=split_num)
    tree_comments = DecisionTreeRegressor(criterion='mse', min_samples_leaf=leaf_num, max_depth=depth,
                                          min_samples_split=split_num)
    tree_likes = DecisionTreeRegressor(criterion='mse', min_samples_leaf=leaf_num, max_depth=depth,
                                       min_samples_split=split_num)

    # 拟合三棵树
    regressor_train = train.drop(['repost', 'comments', 'likes', 'logit'], axis=1, inplace=False)
    repost_train = train.loc[:, ['repost']]
    comments_train = train.loc[:, ['comments']]
    likes_train = train.loc[:, ['likes']]
    predict_repost = tree_repost.fit(regressor_train, repost_train)
    predict_comments = tree_comments.fit(regressor_train, comments_train)
    predict_likes = tree_likes.fit(regressor_train, likes_train)

    # 预测测试集的数据
    regressor_test = test.drop(['repost', 'comments', 'likes', 'logit', 'repost_hat', 'comments_hat', 'likes_hat'],
                               axis=1, inplace=False)
    repost_hat = np.round(predict_repost.predict(regressor_test), 0)  # round函数只是返回四舍五入值，是浮点类型
    comments_hat = np.round(predict_comments.predict(regressor_test), 0)
    likes_hat = np.round(predict_likes.predict(regressor_test), 0)

    # 将预测值赋值并设置为整数值
    data_train['repost_hat'][data_train['logit'] == 0] = repost_hat
    data_train['comments_hat'][data_train['logit'] == 0] = comments_hat
    data_train['likes_hat'][data_train['logit'] == 0] = likes_hat
    data_train['repost_hat'] = data_train['repost_hat'].apply(lambda x: int(x))
    data_train['comments_hat'] = data_train['comments_hat'].apply(lambda x: int(x))
    data_train['likes_hat'] = data_train['likes_hat'].apply(lambda x: int(x))
    return data_train


def precision(data):
    data['deviation_repost'] = list(map(lambda x, y: abs(x - y) / (y + 5), data['repost_hat'], data['repost']))
    # print (data['deviation_repost'])
    data['deviation_likes'] = list(map(lambda x, y: abs(x - y) / (y + 3), data['likes_hat'], data['likes']))
    # print (data['deviation_likes'])
    data['deviation_comments'] = list(map(lambda x, y: abs(x - y) / (y + 3), data['comments_hat'], data['comments']))
    # print (data['deviation_comments'])
    data['lcf_sum'] = data['repost'] + data['likes'] + data['comments']
    #    print (data['lcf_sum'])
    data['lcf_sum'] = data['lcf_sum'].apply(lambda x: 100 if x > 100 else x)
    data['precision_1_-0.8'] = 1 - 0.5 * data['deviation_repost'] - 0.25 * data['deviation_likes'] - 0.25 * data[
        'deviation_comments'] - 0.8
    # print (data['precision_1_-0.8'])
    data.loc[data['precision_1_-0.8'] <= 0, 'sgn'] = 0
    data.loc[data['precision_1_-0.8'] > 0, 'sgn'] = 1
    #    print (data['sgn'])
    precision_ = sum((data['lcf_sum'] + 1) * data['sgn']) / sum(data['lcf_sum'] + 1)

    return precision_


data_train = pd.read_csv('data_train.txt', index_col=[0], header=0)
data_train.head()

# 先筛选两个子集，检测代码
# 假如decision_tree函数中是is_noise,则需要将特征从logit换为is_noise
features_list = list(data_train.columns)[3:7] + list(data_train.columns)[8:]
train_subset = data_train.loc[:, features_list]

best_precision = 0
best_para = [0, 0, 0]
kk = 200
for ii in range(20, 30, 5):  # depth
    for jj in range(10, 210, 50):  # leaf_num
        print('%d th depth, %d leaf num is beginning' % (ii, jj))
        # for kk in range(50,550,100): #min leaf split
        train_subset = data_train.loc[:,
                       ['repost', 'comments', 'likes', 'tfidf', 'number_in_train', 'forward_max', 'comment_max',
                        'like_max', 'forward_mean',
                        'comment_mean', 'like_mean', 'time_weekend', 'panduan', 'length_all', 'length_chinese',
                        'sharing', 'book',
                        'mention', 'emoji', 'video', 'http', 'title', 'hotwords', 'keywords', 'is_noise', 'stock',
                        'logit']]
        valid_subset2 = decision_tree(train_subset, ii, jj, kk)
        score = precision(valid_subset2)
        if score > best_precision:
            best_precision = score
            best_para = [ii, jj, kk]

        print(best_precision)
        print(best_para, '\n')

# 假如decision_tree函数中是is_noise,则需要将特征从logit换为is_noise
train_subset = data_train.loc[:,
               ['repost', 'comments', 'likes', 'tfidf', 'number_in_train', 'forward_max', 'comment_max', 'like_max',
                'forward_mean', 'comment_mean', 'like_mean', 'time_weekend', 'panduan', 'length_all', 'length_chinese',
                'sharing', 'book', 'mention', 'emoji', 'video', 'http', 'title', 'hotwords', 'keywords', 'logit',
                'stock']]

valid_subset = decision_tree(train_subset, 20, 60, 300)
precision(valid_subset)
