import numpy as np
import pandas as pd
from python_ai.common.xcommon import sep
import jieba

pd.set_option('display.max_columns', None, 'display.expand_frame_repr', False)

sep('load')
df = pd.read_excel(r'../../../../../large_data/ML2/纪录片播放量.xls', index_col='片名')
print(len(df))
df.drop(labels=['上传日期', '标签'], axis=1, inplace=True)
columns = df.columns
columns_num = df.describe().columns
columns_obj = set(columns) - set(columns_num)
print(f'Columns: {columns}')
print(f'Columns numeric: {columns_num}')
print(f'Columns object: {columns_obj}')

sep('Wan related')


def process_wan(x):
    xch = '万'
    if xch == x[-1]:
        return float(x[:-1]) * 1e4
    else:
        return float(x)


# columns_obj.remove('简介')
# columns_obj = columns_obj - set('简介')
columns_obj = columns_obj - set(['简介'])  # ATTENTION set(list), not set(element)
print(f'Columns to process Wan: {columns_obj}')
for col in columns_obj:
    df[col] = df[col].map(process_wan)
columns_num = df.describe().columns
columns_obj = set(columns) - set(columns_num)
print(f'Columns numeric: {columns_num}')
print(f'Columns object: {columns_obj}')

sep('简介: jieba split, drop single/stop')
df['简介'] = df['简介'].map(lambda x: jieba.cut(x, cut_all=False))
df['简介'] = df['简介'].map(lambda x: [ i for i in x if len(i) > 1])
stops = pd.read_csv(r'../../../../../large_data/ML2/stopwords1.txt',
                    header=None,
                    sep=r'\n',
                    engine='python',
                    encoding='utf8',
                    )
stops = set(stops[0])
df['简介'] = df['简介'].map(lambda x: [ i for i in x if i not in stops])

sep('简介: tf-idf')
df['简介'] = df['简介'].map(lambda x: ' '.join(x))
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf_model = TfidfVectorizer(norm=None,
                              token_pattern='[a-zA-Z\u4e00-\u9fa5]+')
x_jianjie_sp = tfidf_model.fit_transform(df['简介'])
del df['简介']
print(x_jianjie_sp.shape)

sep('简介: decomposition')
from sklearn.decomposition import TruncatedSVD
dc = TruncatedSVD(n_components=10)
x_jianjie = dc.fit_transform(x_jianjie_sp)
print(x_jianjie.shape)

sep('log(y)')
y = df['播放数']
df.drop(labels='播放数', axis=1, inplace=True)
y = np.log(y)
print(y[:5])

sep('std other columns')
from sklearn.preprocessing import StandardScaler
std = StandardScaler()
x = std.fit_transform(df)

sep('splice')
x = np.c_[x, x_jianjie]

sep('split into train/test')
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.7, random_state=666)

sep('grid search (logistic regression)')
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import Ridge
estimator = Ridge()
params = dict(alpha=[0.1, 0.5, 1, 2, 5, 10, 20, 50, 100])
grid = GridSearchCV(estimator, params, cv=5)
grid.best_score_ = 0.3042600505890338
grid.best_params_ = {'alpha': 100}
# grid.fit(x_train, y_train)
print(f'grid.best_score_ = {grid.best_score_}')
print(f'grid.best_params_ = {grid.best_params_}')

sep('grid search (gbdt)')
from sklearn.ensemble import GradientBoostingRegressor
estimator = GradientBoostingRegressor()
params = dict(n_estimators=[10, 100, 200],
              max_depth=[10, 20, 50])
grid = GridSearchCV(estimator, params, cv=5)
grid.best_score_ = 0.7972023041983274
grid.best_params_ = {'max_depth': 10, 'n_estimators': 100}
# grid.fit(x_train, y_train)
print(f'grid.best_score_ = {grid.best_score_}')
print(f'grid.best_params_ = {grid.best_params_}')

sep('grid search (forest)')
from sklearn.ensemble import RandomForestRegressor
estimator = RandomForestRegressor()
params = dict(n_estimators=[10, 100, 200],
              max_depth=[10, 20, 50])
grid = GridSearchCV(estimator, params, cv=5)
grid.best_score_ = 0.8530543603854436
grid.best_params_ = {'max_depth': 50, 'n_estimators': 200}
# grid.fit(x_train, y_train)
print(f'grid.best_score_ = {grid.best_score_}')
print(f'grid.best_params_ = {grid.best_params_}')

sep('make model and get score')
model = RandomForestRegressor(**({'max_depth': 50, 'n_estimators': 200}))
model.fit(x_train, y_train)
print(f'Training score: {model.score(x_train, y_train)}')
print(f'Testing score: {model.score(x_test, y_test)}')
