import numpy as np
import pandas as pd
from python_ai.common.xcommon import sep
import jieba

pd.set_option('display.max_columns', None, 'display.expand_frame_repr', False)

sep('load')
df = pd.read_excel(r'../../../../../large_data/ML2/纪录片播放量.xls', index_col='片名')
print(df.shape)

sep('Remove 上传日期 标签')
df.drop(labels=['上传日期', '标签'], axis=1, inplace=True)
print(df.shape)

sep('Check columns')
cols = df.columns
cols_num = df.describe().columns
cols_obj = set(cols) - set(cols_num)
print(f'Columns: {cols}')
print(f'Columns numeric: {cols_num}')
print(f'Columns object: {cols_obj}')

sep('Wan related')


def process_wan(x):
    wd = '万'
    if wd in x:
        return float(x[:-1]) * 1e4
    else:
        return float(x)


cols_to_proc = ['播放数', '收藏数', '弹幕数']
for col in cols_to_proc:
    df[col] = df[col].map(process_wan)
cols_num = df.describe().columns
cols_obj = set(cols) - set(cols_num)
print(f'Columns numeric: {cols_num}')
print(f'Columns object: {cols_obj}')

sep('column 简介: jieba split, drop single/stop')
sep('ori')
print(df[:5]['简介'])
sep('split')
df['简介'] = df['简介'].map(lambda x: jieba.lcut(x, cut_all=False))
print(df[:5]['简介'])
sep('drop single character')
df['简介'] = df['简介'].map(lambda x: [wd for wd in x if len(wd) > 1])
print(df[:5]['简介'])
sep('Load stop words')
stops = pd.read_csv(r'../../../../../large_data/ML2/stopwords1.txt',
                    sep=r'\n',
                    engine='python',
                    encoding='utf8',
                    header=None)
stops = stops.iloc[:, 0]
print(stops[:5])
stops = set(stops)
sep('drop stop words')
df['简介'] = df['简介'].map(lambda x: [wd for wd in x if wd not in stops])
print(df[:5]['简介'])

sep('tf-idf')
from sklearn.feature_extraction.text import TfidfVectorizer
tv = TfidfVectorizer(token_pattern='[a-zA-Z\u4e00-\u9fa5]+')
df['简介'] = df['简介'].map(lambda x: ' '.join(x))
x2 = tv.fit_transform(df['简介'])
print(x2.shape)

sep('truncated svd')
from sklearn.decomposition import TruncatedSVD
dc = TruncatedSVD(n_components=10)
x2 = dc.fit_transform(x2)
print(x2.shape)

sep('Get x and y')
x1 = df[['弹幕数', '硬币数', '收藏数', '上传者投稿数', '上传者粉丝数', '评论数', '分享数']]
y = df[['播放数']]
x = np.c_[x1, x2]
from sklearn.preprocessing import StandardScaler
std = StandardScaler()
x = std.fit_transform(x)
y = std.fit_transform(y).ravel()
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.7, random_state=666)

sep('Select model')
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import Lasso, Ridge
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor

sep('Lasso')
est01 = estimator = Lasso()
params = dict(alpha=[0.1, 0.5, 1, 5, 10])
grid = GridSearchCV(estimator, params, cv=5, iid=True)
grid.fit(x_train, y_train)
print(f'Best score: {grid.best_score_}')
print(f'Best params: {grid.best_params_}')

sep('Ridge')
est02 = estimator = Ridge()
params = dict(alpha=[0.5, 1, 10, 15, 20])
grid = GridSearchCV(estimator, params, cv=5, iid=True)
grid.fit(x_train, y_train)
print(f'Best score: {grid.best_score_}')
print(f'Best params: {grid.best_params_}')

sep('DecisionTreeRegressor')
est03 = estimator = DecisionTreeRegressor()
params = dict(max_depth=[5, 10, 15])
grid = GridSearchCV(estimator, params, cv=5, iid=True)
grid.fit(x_train, y_train)
print(f'Best score: {grid.best_score_}')
print(f'Best params: {grid.best_params_}')

sep('KNeighborsRegressor')
est04 = estimator = KNeighborsRegressor()
params = dict(n_neighbors=[5, 10, 20])
grid = GridSearchCV(estimator, params, cv=5, iid=True)
grid.fit(x_train, y_train)
print(f'Best score: {grid.best_score_}')
print(f'Best params: {grid.best_params_}')

sep('RandomForestRegressor')
est05 = estimator = RandomForestRegressor()
params = dict(n_estimators=[5, 10, 20],
              max_depth=[5, 10, 15])
grid = GridSearchCV(estimator, params, cv=5, iid=True)
grid.fit(x_train, y_train)
print(f'Best score: {grid.best_score_}')
print(f'Best params: {grid.best_params_}')

sep('GradientBoostingRegressor')
est06 = estimator = GradientBoostingRegressor()
params = dict(n_estimators=[5, 10, 20],
              max_depth=[5, 10, 15])
grid = GridSearchCV(estimator, params, cv=5, iid=True)
grid.fit(x_train, y_train)
print(f'Best score: {grid.best_score_}')
print(f'Best params: {grid.best_params_}')

sep('Final model')
model = RandomForestRegressor(**{'max_depth': 15, 'n_estimators': 10})
model.fit(x_train, y_train)
print(f'Training score = {model.score(x_train, y_train)}')
print(f'Testing score = {model.score(x_test, y_test)}')
