#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2022/7/13 下午8:10
# @Author  : qiufengfeng
# @File    : text_generator_test.py
# @Description :
import os
from nlp_tools.corpus.classify.competition import DataFoundClassify
from nlp_tools.metrics.classification import F1CategoryCallback
from nlp_tools.tasks.classification import ClassificationCnn
from nlp_tools.tasks.classification.cnn_attention_model import CNN_Attention_Model
from nlp_tools.tasks.classification.cls_mlp_model import ClsMlpModel
from nlp_tools.tasks.classification.dpcnn_model import DPCNN_Model
from nlp_tools.tasks.classification.cnn_lstm_model import CNN_LSTM_Model
from nlp_tools.processors.sequence_processor import SequenceProcessor
from nlp_tools.processors.classification.classification_label_processor import ClassificationLabelProcessor
from nlp_tools.tokenizer.hugging_tokenizer import HuggingTokenizer
from nlp_tools.embeddings.hugginface.autoembedding import AutoEmbedding

from nlp_tools.callbacks.classification.f1score_save_callback import F1SaveCallback

import random
import numpy as np
import tensorflow as tf
def seed_tensorflow(seed=42):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    tf.random.set_seed(seed)
    os.environ['TF_DETERMINISTIC_OPS'] = '1' # pip install tensorflow-determinism

seed_tensorflow(2021)
model_save_path = 'temp/sentiment'


file_name = "/home/fanfanfeng/PycharmProjects/pythonProject/torch_study/sentiment.csv"


bert_model_path = "hfl/chinese-bert-wwm-ext"
(train_data,valid_data) = DataFoundClassify(file_name,split_train_test=True,max_length=32).load_data()
from nlp_tools.generators import  BatchGenerator
text_tokenizer  = HuggingTokenizer(bert_model_path)
embedding  = AutoEmbedding(bert_model_path,text_tokenizer.tokenizer.model_input_names)
label_list = ["0","1"]
label_dict = {key:index for index,key in enumerate(label_list)}

# 默认是不需要分词或者对训练数据进行处理的，如果需要，则要重写text_tokenizer和相应的processor
sequenceProcessor = SequenceProcessor(text_tokenizer=text_tokenizer)
labelProcessor = ClassificationLabelProcessor(vocab2idx=label_dict)
train_generator = BatchGenerator(train_data,
                                   text_processor=sequenceProcessor,
                                   label_processor=labelProcessor,
                                   seq_length=32,
                                   batch_size=200,
                                    use_rdrop=False)
from tqdm import tqdm
for i in tqdm(train_generator):
    print(i)