import pandas as pd
#用片名做行标签
df=pd.read_excel('纪录片播放量.xls',index_col='片名')
print(df.head())
print(df.info())
print(df.describe())
#删除’标签‘’上传日期‘
del df['标签']
del df['上传日期']

# print(df['播放数'].value_counts().index)

def f(x):
    st='万'
    if st in x:
        return float(x[0:-1])*10000
    else:
        return float(x)
#将播放数，弹幕数，收藏数内的数据转化为数字
col=['播放数','弹幕数','收藏数']
for i in col:
    df[i]=df[i].map(f)

print(df['播放数'].value_counts().index)
#绘制播放数概率密度图
df['播放数'].plot(kind='kde')
import matplotlib.pyplot as plt
plt.show()

import jieba
#lcut:精确模式
#jieb分词 处理数据 ，处理后存入列表
df['简介']=df['简介'].map(lambda  x:jieba.lcut(x))
print(df['简介'].head())
#去除长度为1的单词
df['简介']=df['简介'].map(lambda x : [i for i in x if len(i)>1])
print(df['简介'].head())

#删除停词表里的词
#读取停词表的单词
stop=pd.read_csv('stopwords1.txt',header=None,sep='\n\t',engine='python',encoding='utf-8')
stop=stop[0]
stop=list(stop)
# for  i in stop:
#     print(i)
# print('打印停词表')

#删除停词表中的单词
df['简介']=df['简介'].map(lambda x:[i for i in x if i not in stop])
print(df['简介'].head())

#将简介的单词拼接成一个单词，词与词之间用空格连接
df['简介']=df['简介'].map(lambda x:' '.join(x))
print(df['简介'].head())

s=df.loc["雷蒙德·布兰克的厨房秘密【全8集】","简介"]
from wordcloud import WordCloud
#创建词云模型
wc=WordCloud(background_color='white',width=400,height=400,margin=2)
#传入数据
pic=wc.generate(s)
#画图
plt.imshow(pic)
#关闭坐标
plt.axis('off')
# plt.show()


#词频统计
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf=TfidfVectorizer(token_pattern='[\u4e00-\u9fa5|a-zA-Z]+')
x1=tfidf.fit_transform(df['简介'])
print(x1.shape)
#对统计完词频进行降维
from sklearn.decomposition import TruncatedSVD
svd=TruncatedSVD(n_components=10)
x1=svd.fit_transform(x1)
print(x1.shape)
#删除简介
del df['简介']

import numpy as np
#对标签y 取对数缩放
y=df['播放数'].map(lambda x:np.log(x))
print(df.columns)
#删除播放数
del  df['播放数']
#获取特征值x
x=df.values

#特征缩放
from sklearn.preprocessing import StandardScaler
ss=StandardScaler()
x=ss.fit_transform(x)
#连续型特征和简介拼接在一起
x=np.c_[x,x1]
print(x.shape)

#切分测试集和训练集
from sklearn.model_selection import train_test_split
train_x,test_x,train_y,test_y=train_test_split(x,y,train_size=0.7)

#创建模型 网格搜索
#岭回归
from sklearn.linear_model import Ridge
lr=Ridge()
pg={
    'alpha':[0.1,0.2,0.5,1,2,5,10,100]
}
from sklearn.model_selection import GridSearchCV
gri=GridSearchCV(estimator=lr,param_grid=pg,cv=5)
gri.fit(train_x,train_y)

print(gri.best_score_)
print(gri.best_params_)
#随机森林
from sklearn.ensemble import RandomForestRegressor
rfr=RandomForestRegressor()
pg={
    'n_estimators':[5,10,50,100],
    'max_depth':[2,3,5]
}
gri=GridSearchCV(rfr,param_grid=pg,cv=5)
gri.fit(train_x,train_y)

print(gri.best_score_)
print(gri.best_params_)

#梯度提升树 gbdt
from sklearn.ensemble import GradientBoostingRegressor
gbr=GradientBoostingRegressor()
pg={
    "n_estimators": [5, 10, 50, 100],
    "max_depth": [2, 3, 5]
}
gri=GridSearchCV(gbr,param_grid=pg,cv=5)
gri.fit(train_x,train_y)

print(gri.best_params_)
print(gri.best_score_)

#选用梯度提升树
gbr=GradientBoostingRegressor(max_depth=3,n_estimators=100)
gbr.fit(train_x,train_y)
print(gbr.score(test_x,test_y))