# tutorial ：https://www.youtube.com/watch?v=GYDFBfx8Ts8
#%% 下载数据集
from kaggle.api.kaggle_api_extended import KaggleApi
api=KaggleApi()
api.authenticate()
api.competition_download_file('sentiment-analysis-on-movie-reviews',
                              'train.tsv.zip',path='./')

##
import zipfile
import pandas as pd
with zipfile.ZipFile('train.tsv.zip','r') as zipref:
    zipref.extractall('./')
df = pd.read_csv('train.tsv',sep='\t')
df.head()

df.drop_duplicates(subset='SentenceId',keep='first',inplace=True)
df.head()
len(df)

#%%
seqlen=df['Phrase'].apply(lambda x: len(x.split()))

import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style('darkgrid')
plt.figure(figsize=(16,10))
sns.distplot(seqlen)
plt.show()

#%%
SEQ_LEN=50
from transformers import AutoTokenizer
tokenizer=AutoTokenizer.from_pretrained('bert-base-cased')

#%% encoding   给他自动变成 ids
tokens = tokenizer.encode_plus('hello world',max_length=SEQ_LEN,
                               truncation=True,padding='max_length',
                               add_special_tokens=True,return_token_type_ids=False,
                               return_attention_mask=True,return_tensors='tf')
tokens

#%%
import numpy as np
Xids=np.zeros((len(df),SEQ_LEN))   #初始化ids
Xmask=np.zeros((len(df),SEQ_LEN))   #初始化attention

for i,sequence in enumerate(df['Phrase']):
    tokens = tokenizer.encode_plus(sequence,max_length=SEQ_LEN,
                                   truncation=True,padding='max_length',
                                   add_special_tokens=True,return_token_type_ids=False,
                                   return_attention_mask=True,return_tensors='tf')
    Xids[i,:],Xmask[i,:]=tokens['input_ids'],tokens['attention_mask']
Xids  #存放ids
Xmask  #存放masks

df['Sentiment'].unique()
#%% one-hot与数据保存
arr=df['Sentiment'].values
arr.size
labels=np.zeros((arr.size,arr.max()+1))
labels[np.arange(arr.size),arr]=1
labels

with open('xids.npy','wb') as f:
    np.save(f,Xids)
with open('xmask.npy','wb') as f:
    np.save(f,Xmask)
with open('labels.npy','wb') as f:
    np.save(f,labels)

del Xids,Xmask,labels

with open('xids.npy','rb') as fp:
    Xids=np.load(fp)
with open('xmask.npy','rb') as fp:
    Xmask=np.load(fp)
with open('labels.npy','rb') as fp:
    labels=np.load(fp)
Xids

#%%  配置tensorflowGPU
import tensorflow as tf
tf.config.experimental.list_physical_devices('GPU')
dataset=tf.data.Dataset.from_tensor_slices((Xids,Xmask,labels))
for i in dataset.take(1):
    print(i)

def map_func(input_ids,masks,labels):
    return{'input_ids':input_ids,'attention_mask':masks,},labels
dataset=dataset.map(map_func)
for i in dataset.take(1):
    print(i)

dataset=dataset.shuffle(100000).batch(32)
DS_LEN=len(list(dataset))

SPLIT=0.9   #划分数据集
train=dataset.take(round(DS_LEN*SPLIT))
val=dataset.skip(round(DS_LEN*SPLIT))
del dataset
#%% 模型搭建
from transformers import TFAutoModel
bert=TFAutoModel.from_pretrained('bert-base-cased')

input_ids=tf.keras.layers.Input(shape=(SEQ_LEN), name='input_ids',dtype='int32')   #这里的name一定要与前面对应
mask=tf.keras.layers.Input(shape=(SEQ_LEN), name='mask',dtype='int32')   #这里的name一定要与前面对应


embeddings=bert(input_ids, attention_mask=mask)[0]   #这一层的最后这个 [0] 并不是函数式API，而是忽略第二个hidden层的2dimension的输出
x=tf.keras.layers.GlobalMaxPool1D()(embeddings)
x=tf.keras.layers.BatchNormalization()(x)
x=tf.keras.layers.Dense(128,activation='relu')(x)
x=tf.keras.layers.Dropout(0.1)(x)
x=tf.keras.layers.Dense(32,activation='relu')(x)
y=tf.keras.layers.Dense(5,activation='softmax',name='outputs')(x)

model=tf.keras.Model([input_ids,mask],y)
model.summary()

model.layers[2].trainable=False  #bert不训练（很重要，这也是迁移学习的特点）
model.summary()

optimizer=tf.keras.optimizers.Adam(0.01)
loss = tf.keras.losses.CategoricalCrossentropy()
acc = tf.keras.metrics.CategoricalAccuracy('accuracy')

model.compile(optimizer=optimizer,loss=loss,metrics=[acc])
history = model.fit(train, validation_data= val,
                    epochs=140)

