import pandas as pd
from datetime import datetime
from transformers import BertTokenizer
from nlpx.text_token.utils import batch_cut
from nlpx.text_token import TokenEmbedding, BaseTokenizer, SimpleTokenizer, PaddingTokenizer, Tokenizer, AutoTokenizer,\
	BosTokenizer, EosTokenizer


pretrained_path = r'/Users/summy/data/albert_small_zh'
pretrained_path2 = r'/Users/summy/data/shizhan/sgns.weibo.word.bz2'


if __name__ == '__main__':
	print('start', datetime.now())
	df = pd.read_csv('~/project/python/parttime/text_gcn/data/北方地区不安全事件统计20240331.csv', encoding='GBK')
	print('pd.read_csv', datetime.now())
	# df['故障标志'] = df['故障标志'].astype('category')
	# classes = df['故障标志'].cat.categories.values.tolist()
	# df['故障标志'] = df['故障标志'].cat.codes
	# y = df['故障标志'].to_numpy()

	test_texts = ['昆明航B737飞机执行昆明至西安航班 西安机场进近过程中  机组将飞机襟翼位置设置错误触发近地警告事件',
				  '海航 B737飞机执行汉中至广州航班 飞机起飞后发现汉中过站期间货舱有730KG货物未卸机']

	# tokenizer = BertTokenizer.from_pretrained(pretrained_path)
	# print(tokenizer.batch_encode_plus(test_texts, max_length=50, padding='max_length',
	# 								  return_token_type_ids=False, return_attention_mask=False,
	# 								  truncation=True, add_special_tokens=True, return_tensors='pt')['input_ids'])

	# tokenizer = TokenEmbedding(pretrained_path2)
	# tokenizer = BaseTokenizer(texts=df['故障描述'].values)
	# tokenizer = SimpleTokenizer(texts=df['故障描述'].values)
	# tokenizer = PaddingTokenizer(texts=df['故障描述'].values)
	tokenizer = Tokenizer(texts=df['故障描述'].values)  # 134
	print('new Tokenizer', datetime.now())
	# tokenizer.save()
	# tokenizer = AutoTokenizer.load()
	# print(type(tokenizer))
	# ids = tokenizer.encode_plus(test_texts[0], return_mask=True)
	# print('encode_plus', datetime.now())
	# print(ids)
	# ids = tokenizer.batch_encode_plus(test_texts, return_mask=True)
	# print('batch_encode_plus', datetime.now())
	# print(ids)
	# ids, sizes = tokenizer.padding(ids)
	# print(ids, sizes)
	# print(tokenizer.vocab[:10])
	# print(tokenizer.vocab_size)
	
	# tokenizer.save()
	# tokenizer = Tokenizer.load()
	# print(tokenizer.vocab[:10])
	# print(tokenizer.batch_encode(test_texts))
	# print(tokenizer.batch_encode(test_texts, max_length=20))
	#
	# ids = tokenizer.batch_encode(test_texts[0], max_length=50)
	# print(ids)
	# print(tokenizer.batch_decode(ids, return_sentence=True))
	
	import re
	
	# lines = batch_cut(df['故障描述'], cut_type='char')
	# lines = filter(lambda x: '，' in x, lines)
	# print(list(lines))
	
	tokenizer = BosTokenizer(texts=df['故障描述'].values, min_freq=0)  # 134
	ids = tokenizer.batch_encode(test_texts)
	print(tokenizer.batch_decode(ids, return_special_tokens=True, return_sentence=True))
	
	tokenizer = EosTokenizer(texts=df['故障描述'].values, min_freq=0)  # 134
	ids = tokenizer.batch_encode(test_texts)
	print(tokenizer.batch_decode(ids, return_special_tokens=True, return_sentence=True))
