import os
from nlp_tools.corpus.classify.competition import DataFoundClassify
from nlp_tools.processors.sequence_processor import SequenceProcessor
from nlp_tools.processors.classification.classification_label_processor import ClassificationLabelProcessor
from nlp_tools.embeddings.bare_embedding import BareEmbedding
from nlp_tools.embeddings.bert_embedding import BertEmbedding
from nlp_tools.tokenizer.whitespace_tokenizer import WhiteSpaceTokenizer


from nlp_tools.tasks.classification import BiLSTM_Model
from nlp_tools.tasks.classification.cnn_attention_model import CNN_Attention_Model
from nlp_tools.tasks.classification.dpcnn_model import DPCNN_Model
from nlp_tools.tasks.classification.cnn_lstm_model import CNN_LSTM_Model

import random
import numpy as np
import tensorflow as tf
def seed_tensorflow(seed=42):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    tf.random.set_seed(seed)
    os.environ['TF_DETERMINISTIC_OPS'] = '1' # pip in stall tensorflow-determinism

seed_tensorflow(2021)

data_path = r'/home/qiufengfeng/nlp/competition/datagrand/基于大规模预训练模型的风险事件标签识别/processed'
bert_config_path = r'/home/qiufengfeng/nlp/pre_trained_model/datagrand/'

train_data = DataFoundClassify.load_data(file_name=os.path.join(data_path,'train.csv'))
valid_data = DataFoundClassify.load_data(file_name=os.path.join(data_path,'valid.csv'))

text_tokenizer  = WhiteSpaceTokenizer(token_dict=os.path.join(data_path,'vocab.txt'))


MaxLine = 400


#embedding  = BareEmbedding(embedding_size=768,vocab_size=len(text_tokenizer.token_dict))
embedding = BertEmbedding(bert_config_path,num_hidden_layers=2)

# 默认是不需要分词或者对训练数据进行处理的，如果需要，则要重写text_tokenizer和相应的processor
sequenceProcessor = SequenceProcessor(text_tokenizer=text_tokenizer)
labelProcessor = ClassificationLabelProcessor()
#model = CNN_LSTM_Model(embedding=embedding,text_processor=sequenceProcessor,label_processor=labelProcessor)
model = DPCNN_Model(embedding=embedding,text_processor=sequenceProcessor,label_processor=labelProcessor,max_sequence_length = MaxLine,train_sequece_length_as_max_sequence_length=MaxLine,use_rdrop=False,use_FGM=False)
#model = CNN_Attention_Model(embedding=embedding,text_processor=sequenceProcessor,label_processor=labelProcessor)

from nlp_tools.metrics.classification import F1CategoryCallback


model_save_path = 'train_model/'
f1_callback = F1CategoryCallback(model,model_save_path,valid_data)

from transformers import AdamWeightDecay
model.fit(train_data,validate_data=valid_data,epochs=60,callbacks=[f1_callback],batch_size=32)
#model.save(model_save_path)


# cnn_attention 0.48811


