from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from datasets import load_from_disk, load_metric
import evaluate
from transformers import TrainingArguments, Trainer
from transformers.data.data_collator import DataCollatorWithPadding

from nlpx.model import TextCNN
from nlpx.text_token import PaddingTokenizer


CLASSES = ['A1', 'A2', 'B1', 'B2', 'C1', 'C2']


class Model(nn.Module):
	
	def __init__(self, embed_dim: int, vocab_size: int, num_classes: int):
		super().__init__()
		self.embedding = nn.Embedding(vocab_size, embed_dim)
		self.text_cnn = TextCNN(embed_dim, out_features=num_classes)
	
	def forward(self, input_ids: torch.Tensor, labels: torch.LongTensor):
		logits = self.text_cnn(self.embedding(input_ids))
		return F.cross_entropy(logits, labels), logits
	
	
class Collator:
	
	def __init__(self, tokenizer, max_length: Optional[int] = None):
		self.tokenizer = tokenizer
		self.max_length = max_length
	
	def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]:
		input_ids, labels = zip(*[(f['input_ids'], f['label']) for f in features])
		max_length = max(map(lambda x: len(x), input_ids))
		max_length = min(max_length, self.max_length) if self.max_length and self.max_length > 0 else max_length
		input_ids = self.tokenizer.pad(input_ids, max_length)
		input_ids = torch.LongTensor(input_ids)
		labels = torch.LongTensor(np.array(labels))
		return {'input_ids': input_ids, 'labels': labels}
		
	
if __name__ == '__main__':
	dataset = load_from_disk('data')
	texts = dataset['train']['text'] + dataset['test']['text']
	
	stop_words = [str(i) for i in range(1)]
	stop_words.extend(['-', '的', '一', '是', '了', '年', '月', '日'])
	tokenizer = PaddingTokenizer.from_texts(texts, min_freq=5, cut_type='char', stop_words=stop_words, word_freq=False)
	
	def process_fn(data):
		return {'input_ids': tokenizer.batch_encode(data['text'], padding=False)}
	
	dataset = dataset.map(process_fn, batched=True, batch_size=64, num_proc=2, remove_columns=['text'])
	# print(dataset)
	
	# metric = load_metric('accuracy')  # deprecated
	metric = evaluate.load('accuracy')
	
	def compute_metrics(eval_pred):
		logits, labels = eval_pred
		logits = logits.argmax(axis=1)
		return metric.compute(predictions=logits, references=labels)
	
	args = TrainingArguments(
		# 定义临时数据保存路径
		output_dir='./output_dir',
		max_steps=100,
		# 定义学习率
		learning_rate=1e-3,
		# 加入参数权重衰减，防止过拟合
		weight_decay=1e-2,
		# 定义测试和训练时的批次大小
		per_device_eval_batch_size=64,
		per_device_train_batch_size=64,
	)
	
	model = Model(128, vocab_size=tokenizer.vocab_size, num_classes=len(CLASSES))
	
	trainer = Trainer(
		model=model,
		args=args,
		train_dataset=dataset['train'],
		eval_dataset=dataset['test'],
		compute_metrics=compute_metrics,
		data_collator=Collator(tokenizer),
	)
	trainer.train()
	print(trainer.evaluate())
	
	# wandb.errors.UsageError: api_key not configured (no-tty). call wandb.login(key=[your_api_key])
	# pip uninstall wandb

