import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from python_ai.common.xcommon import sep
pd.set_option('display.max_rows', None, 'display.max_columns', None, 'display.max_colwidth', 1000, 'display.expand_frame_repr', False)
plt.rcParams['font.sans-serif'] = ['Simhei']
plt.rcParams['axes.unicode_minus'] = False

plt.figure(figsize=[12, 6])
spr = 1
spc = 2
spn = 0

sep('load')
df = pd.read_excel(r'..\..\..\..\..\large_data\ML2\纪录片播放量.xls',
                   index_col='片名')
print(len(df))
cols = set(df.columns)
num_cols = set(df.describe().columns)
obj_cols = cols - num_cols
print(f'Columns: {cols}')
print(f'Numeric Columns: {num_cols}')
print(f'Object Columns: {obj_cols}')
# 片名 上传日期  播放数 弹幕数  硬币数  收藏数  上传者投稿数  上传者粉丝数  评论数
# 分享数 标签 简介

del df['标签']
del df['上传日期']

sep('Wan')
# 处理数量中的“万”字


def f_wan(x):
    st = '万'
    if x[-1] == st:
        return float(x[:-1]) * 1e4
    else:
        return float(x)


wan_cols = ['播放数', '弹幕数', '收藏数']
print(f'Wan columns: {wan_cols}')
for col in wan_cols:
    df[col] = df[col].map(f_wan)
sep('After process WAN')
num_cols = set(df.describe().columns)
obj_cols = cols - num_cols
print(f'Numeric Columns: {num_cols}')
print(f'Object Columns: {obj_cols}')
spn += 1
ax = plt.subplot(spr, spc, spn)
df['播放数'].plot(kind='kde', ax=ax)

# process 简介
sep('split and drop single/stop')
import jieba
# split
df['简介'] = df['简介'].map(lambda x: jieba.lcut(x))  # lcut 精确模式
# drop single characters
df['简介'] = df['简介'].map(lambda x: [i for i in x if len(i) > 1])
# stop words
sep('stop words')
stop = pd.read_csv(r'..\..\..\..\..\large_data\ML2\stopwords1.txt',
                     header=None,
                     sep=r'\n',  # ATTENTION \n\t? r'\n', \n\n are better for one column only file, see x_4_stopwords.py
                     engine='python',
                     encoding='utf-8')
stop = set(stop[0])
df['简介'] = df['简介'].map(lambda x: [i for i in x if i not in stop])


sep('join 简介')
df['简介'] = df['简介'].map(lambda x: ' '.join(x))

# word cloud
sep('word cloud')
s = df.loc["雷蒙德·布兰克的厨房秘密【全8集】", "简介"]
from wordcloud import WordCloud
wc = WordCloud(background_color='white',
               width=400,
               height=400,
               margin=4)
pic = wc.generate(s)
spn += 1
ax = plt.subplot(spr, spc, spn)
plt.axis('off')
plt.imshow(pic)

# tf idf
sep('tf-idf => x1')
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(norm=None,
                        token_pattern='[a-zA-Z\u4e00-\u9fa5]+')
x1 = tfidf.fit_transform(df['简介'])  # ATTENTION sparse matrix, cannot concat directly, should .A / .toarray()
print('x1.shape', x1.shape)
del df['简介']

# x1 TruncatedSVD
sep('dc x1')

# ATTENTION
# https://stats.stackexchange.com/questions/239481/difference-between-scikit-learn-implementations-of-pca-and-truncatedsvd
# PCA is (truncated) SVD on centered data (by per-feature mean substraction). If the data is
# already centered, those two classes will do the same.
#
# In practice TruncatedSVD is useful on large sparse datasets which cannot be centered without making the memory
# usage explode.
#
# x1_m = x1.mean(axis=0)
# x1 -= x1_m

from sklearn.decomposition import TruncatedSVD
dc = TruncatedSVD(n_components=10)
x1 = dc.fit_transform(x1)
print('x1.shape', x1.shape)

# log y
sep('log y')
y = df['播放数'].map(lambda x: np.log(x))
del df['播放数']
print('df.shape', df.shape)

# scale
sep('scale x')
x = df.values
from sklearn.preprocessing import StandardScaler
ss = StandardScaler()
x = ss.fit_transform(x)
print('x.shape', x.shape)

sep('concat x, x1 => x')
# x = np.concatenate([x, x1], axis=1)
x = np.c_[x, x1]
print('x.shape', x.shape)

sep('split train and test')
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y,
                                                    train_size=0.7,
                                                    random_state=666)

sep('grid search (ridge)')
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import Ridge
estimator = Ridge()
params = dict(alpha=[0.1, 0.5, 1, 2, 5, 10, 100, 200])
grid = GridSearchCV(estimator, params, cv=5)
grid.best_score_ = 0.2872804464085015
grid.best_params_ = {'alpha': 200}
# grid.fit(x_train, y_train)
print(f'grid.best_score_ = {grid.best_score_}')
print(f'grid.best_params_ = {grid.best_params_}')

sep('grid search (forest)')
from sklearn.ensemble import RandomForestRegressor
estimator = RandomForestRegressor()
params = dict(n_estimators=[10, 50, 100],
              max_depth=[2, 4, 8],
              )
grid = GridSearchCV(estimator, params, cv=5)
grid.best_score_ = 0.8489399805236365
grid.best_params_ = {'max_depth': 8, 'n_estimators': 100}
# grid.fit(x_train, y_train)
print(f'grid.best_score_ = {grid.best_score_}')
print(f'grid.best_params_ = {grid.best_params_}')

sep('best forest')
model = RandomForestRegressor(**(grid.best_params_))
model.fit(x_train, y_train)
print(f'Training score = {model.score(x_train, y_train)}')
print(f'Testing score = {model.score(x_test, y_test)}')

sep('grid search gbdt')
from sklearn.ensemble import GradientBoostingRegressor
estimator = GradientBoostingRegressor()
params = dict(n_estimators=[10, 50, 100],
              max_depth=[2, 4, 8],
              )
grid = GridSearchCV(estimator, params, cv=5)
grid.best_score_ = 0.8491163087406012
grid.best_params_ = {'max_depth': 4, 'n_estimators': 100}
# grid.fit(x_train, y_train)
print(f'grid.best_score_ = {grid.best_score_}')
print(f'grid.best_params_ = {grid.best_params_}')

sep('best gbdt')
model = GradientBoostingRegressor(**(grid.best_params_))
model.fit(x_train, y_train)
print(f'Training score = {model.score(x_train, y_train)}')
print(f'Testing score = {model.score(x_test, y_test)}')

# Finally show all drawings.
plt.show()
