import pandas as pd
import jieba
from sklearn.externals import joblib
from sklearn.metrics import accuracy_score
import xgboost as xgb

'''导入文件数据'''

data_train = pd.DataFrame(pd.read_table('weibo_train_data.txt', sep='\t'))
data_test = pd.DataFrame(pd.read_table('weibo_predict_data.txt', sep='\t'))
data_train.columns = ['uid', 'mid', 'time', 'forward_count', 'comment_count', 'like_count', 'content']
data_train['type'] = 'train'
data_test.columns = ['uid', 'mid', 'time', 'content']
data_test['type'] = 'test'

'''数据处理'''

data_train=data_train.dropna()      #删去空值行
# print(data_train.info())
data_test=data_test.dropna()
# print(data_test.info())

data_all = pd.concat([data_train, data_test], axis=0, sort=False)
data_all['time'] = pd.to_datetime(data_all['time'])
data_all['month'] = data_all.time.dt.month.astype(float)
data_all['hour'] = data_all.time.dt.hour.astype(float)
data_all['weekday'] = data_all.time.dt.weekday.astype(float)

#时间特征
def hour_cut(x):
    if 0 <= x <= 7:  # 凌晨
        return 0
    elif 7 < x <= 12:  # 上午
        return 1
    elif 12 < x <= 17:  # 下午
        return 2
    elif 17 < x <= 19:  # 傍晚
        return 3
    elif 19 < x < 24:  # 晚上
        return 4

data_all['hour_cut'] = data_all['hour'].map(hour_cut).astype(float)
data_all = data_all.drop(['hour'], axis=1)

# 评论特征
uid_and_commentCount = data_train.groupby('uid')['comment_count'].count()
uid_and_commentMean = data_train.groupby('uid')['comment_count'].mean()
uid_and_commentMax = data_train.groupby('uid')['comment_count'].max()

# 点赞特征
uid_and_likeCount = data_train.groupby('uid')['like_count'].count()
uid_and_likeMean = data_train.groupby('uid')['like_count'].mean()
uid_and_likeMax = data_train.groupby('uid')['like_count'].max()

# 转发特征
uid_and_forwardCount = data_train.groupby('uid')['forward_count'].count()
uid_and_forwardMean = data_train.groupby('uid')['forward_count'].mean()
uid_and_forwardMax = data_train.groupby('uid')['forward_count'].max()

# 博文特征
uid_and_contentCount = data_train.groupby('uid')['content'].count()

# 数据合并
data_all['uid_and_commentCount'] = data_all.loc[:, 'uid'].map(uid_and_commentCount).fillna(0)
data_all['uid_and_commentMean'] = data_all.loc[:, 'uid'].map(uid_and_commentMean).fillna(0)
data_all['uid_and_commentMax'] = data_all.loc[:, 'uid'].map(uid_and_commentMax).fillna(0)
data_all['uid_and_likeCount'] = data_all.loc[:, 'uid'].map(uid_and_likeCount).fillna(0)
data_all['uid_and_likeMean'] = data_all.loc[:, 'uid'].map(uid_and_likeMean).fillna(0)
data_all['uid_and_likeMax'] = data_all.loc[:, 'uid'].map(uid_and_likeMax).fillna(0)
data_all['uid_and_forwardCount'] = data_all.loc[:, 'uid'].map(uid_and_forwardCount).fillna(0)
data_all['uid_and_forwardMean'] = data_all.loc[:, 'uid'].map(uid_and_forwardMean).fillna(0)
data_all['uid_and_forwardMax'] = data_all.loc[:, 'uid'].map(uid_and_forwardMax).fillna(0)
data_all['uid_and_contentCount'] = data_all.loc[:, 'uid'].map(uid_and_contentCount).fillna(0)

data_all['http'] = 0.0
data_all['hongbao'] = 0.0
data_all['fengxiang'] = 0.0
data_all['dache'] = 0.0
data_all['cn'] = 0.0
data_all['weibo'] = 0.0
data_all['topic'] = 0.0
data_all['ai'] = 0.0
data_all['zhuangfa'] = 0.0
data_all['daijinjuan'] = 0.0
data_all['nianfen'] = 0.0

temp = data_all.iloc[0:100, -1].index   #提取前100行数据的索引
for index in temp:
    seg_list = jieba.cut(data_all.loc[index, 'content'].to_string())        #构造前缀词典  分词
    for j in seg_list:
        if j == 'http':
            data_all.loc[index, 'http'] = 1.0
        elif j == '红包':
            data_all.loc[index, 'hongbao'] = 1.0
        elif j == '分享':
            data_all.loc[index, 'fengxiang'] = 1.0
        elif j == '打车':
            data_all.loc[index, 'dache'] = 1.0
        elif j == 'cn':
            data_all.loc[index, 'cn'] = 1.0
        elif j == '微博':
            data_all.loc[index, 'weibo'] = 1.0
        elif j == '##':
            data_all.loc[index, 'topic'] = 1.0
        elif j == '@':
            data_all.loc[index, 'ai'] = 1.0
        elif j == '[':
            data_all.loc[index, 'zhuangfa'] = 1.0
        elif j == '代金券':
            data_all.loc[index, 'daijinjuan'] = 1.0
        elif j == '2015':
            data_all.loc[index, 'nianfen'] = 1.0

'''分出训练集和测试集'''
data_all = data_all.drop(['uid', 'mid', 'time', 'content', 'type'], axis=1)
train1 = data_all.loc[data_all['month'] == 2 | 3 | 4, :]
test1 = data_all.loc[data_all['month'] == 5, :]