import pandas as pd
import jieba
from sklearn.externals import joblib
from sklearn.metrics import accuracy_score
import xgboost as xgb
from pandas.compat import StringIO
import bowen

def pred_xl(data):
    X_test = bowen.test1.drop(['forward_count', 'comment_count', 'like_count'], axis=1)
    # data = {'uid':'c01014739c046cd31d6f1b4fb71b440f', 'mid':'0cd5ef13eb11ed0070f7625b14136ec9', 'time':'2015-08-19 22:44:55', 'content':'Xah Emacs Tutorial http://t.cn/zWoY9IZ'}
    data_work = X_test.append(data,ignore_index=True)

    data_work['time'] = pd.to_datetime(data_work['time'])
    data_work['month'] = data_work.time.dt.month.astype(float)
    data_work['hour'] = data_work.time.dt.hour.astype(float)
    data_work['weekday'] = data_work.time.dt.weekday.astype(float)

    #时间特征
    def hour_cut(x):
        if 0 <= x <= 7:  # 凌晨
            return 0
        elif 7 < x <= 12:  # 上午
            return 1
        elif 12 < x <= 17:  # 下午
            return 2
        elif 17 < x <= 19:  # 傍晚
            return 3
        elif 19 < x < 24:  # 晚上
            return 4

    data_work['hour_cut'] = data_work['hour'].map(hour_cut).astype(float)
    data_all = data_work.drop(['hour'], axis=1)

    data_work['uid_and_commentCount'] = data_work.loc[:, 'uid'].map(bowen.uid_and_commentCount).fillna(0)
    data_work['uid_and_commentMean'] = data_work.loc[:, 'uid'].map(bowen.uid_and_commentMean).fillna(0)
    data_work['uid_and_commentMax'] = data_work.loc[:, 'uid'].map(bowen.uid_and_commentMax).fillna(0)
    data_work['uid_and_likeCount'] = data_work.loc[:, 'uid'].map(bowen.uid_and_likeCount).fillna(0)
    data_work['uid_and_likeMean'] = data_work.loc[:, 'uid'].map(bowen.uid_and_likeMean).fillna(0)
    data_work['uid_and_likeMax'] = data_work.loc[:, 'uid'].map(bowen.uid_and_likeMax).fillna(0)
    data_work['uid_and_forwardCount'] = data_work.loc[:, 'uid'].map(bowen.uid_and_forwardCount).fillna(0)
    data_work['uid_and_forwardMean'] = data_work.loc[:, 'uid'].map(bowen.uid_and_forwardMean).fillna(0)
    data_work['uid_and_forwardMax'] = data_work.loc[:, 'uid'].map(bowen.uid_and_forwardMax).fillna(0)
    data_work['uid_and_contentCount'] = data_work.loc[:, 'uid'].map(bowen.uid_and_contentCount).fillna(0)

    data_work['http'] = 0.0
    data_work['hongbao'] = 0.0
    data_work['fengxiang'] = 0.0
    data_work['dache'] = 0.0
    data_work['cn'] = 0.0
    data_work['weibo'] = 0.0
    data_work['topic'] = 0.0
    data_work['ai'] = 0.0
    data_work['zhuangfa'] = 0.0
    data_work['daijinjuan'] = 0.0
    data_work['nianfen'] = 0.0

    temp = data_work.iloc[0:100, -1].index   #提取前100行数据的索引
    seg_list = str(jieba.cut(data_work['content'], cut_all=True))
    for j in seg_list:
        if j == 'http':
            data_work.loc['http'] = 1.0
        elif j == '红包':
            data_work.loc['hongbao'] = 1.0
        elif j == '分享':
            data_work.loc['fengxiang'] = 1.0
        elif j == '打车':
            data_work.loc['dache'] = 1.0
        elif j == 'cn':
            data_work.loc['cn'] = 1.0
        elif j == '微博':
            data_work.loc['weibo'] = 1.0
        elif j == '##':
            data_work.loc['topic'] = 1.0
        elif j == '@':
            data_work.loc['ai'] = 1.0
        elif j == '[':
            data_work.loc['zhuangfa'] = 1.0
        elif j == '代金券':
            data_work.loc['daijinjuan'] = 1.0
        elif j == '2015':
            data_work.loc['nianfen'] = 1.0

    data_work = data_work.drop(['uid', 'mid', 'time', 'content','hour'], axis=1)
    print(data_work.tail(1)[['uid_and_commentCount','uid_and_commentMean','uid_and_commentMax','uid_and_likeCount','uid_and_likeMean','uid_and_likeMax','uid_and_forwardCount','uid_and_forwardMean','uid_and_forwardMax','uid_and_contentCount']])

    # zf = joblib.load('zf_model.m')
    # zfs = str(zf.predict(data_work.tail(1)))
    # dz = joblib.load('dz_model.m')
    # dzs = str(dz.predict(data_work.tail(1)))
    # pl = joblib.load('pl_model.m')
    # pls = str(pl.predict(data_work.tail(1)))

    zf = joblib.load('zf_model.m')
    zfs = float(zf.predict(data_work.tail(1)))
    dz = joblib.load('dz_model.m')
    dzs = float(dz.predict(data_work.tail(1)))
    pl = joblib.load('pl_model.m')
    pls = float(pl.predict(data_work.tail(1)))

    # print(data_work)
    #
    # print(zfs, dzs, pls)
    fincount={'zfs':zfs,'pls':dzs,'dzs':pls}

    return fincount