import torch,time
from datasets import load_dataset
from transformers import BertTokenizer, BertModel
from safetensors.torch import save_file


# 中文分类

start_time = time.time()  # 记录开始时间

# 定义数据集
class Dataset(torch.utils.data.Dataset):
  def __init__(self, split):
    self.dataset = load_dataset(path='datasets', split=split)

  def __len__(self):
    return len(self.dataset)

  def __getitem__(self, i):
    text = self.dataset[i]['text']
    label = self.dataset[i]['label']

    return text, label

device = torch.device("mps")
if torch.cuda.is_available():
  print("CUDA is available! You can use GPU acceleration.")
  device = torch.device("cuda")  # 使用CUDA设备


# 打印当前设备
print(f"Using device: {device}")

dataset = Dataset('train')

# len(dataset), dataset[0]

# 加载字典和分词工具
token = BertTokenizer.from_pretrained('model/bert-base-chinese')


def collate_fn(data):
  sents = [i[0] for i in data]
  labels = [i[1] for i in data]
  #编码
  data = token.batch_encode_plus(batch_text_or_text_pairs=sents,
                                 truncation=True,
                                 padding='max_length',
                                 max_length=500,
                                 return_tensors='pt',
                                 return_length=True)
  #input_ids:编码之后的数字
  #attention_mask:是补零的位置是0,其他位置是1
  input_ids = data['input_ids'].to(device)
  attention_mask = data['attention_mask'].to(device)
  token_type_ids = data['token_type_ids'].to(device)
  labels = torch.LongTensor(labels).to(device)
  #print(data['length'], data['length'].max())
  return input_ids, attention_mask, token_type_ids, labels


#数据加载器
loader = torch.utils.data.DataLoader(dataset=dataset,
                                     batch_size=16,
                                     collate_fn=collate_fn,
                                     shuffle=True,
                                     drop_last=True)

for i, (input_ids, attention_mask, token_type_ids,
        labels) in enumerate(loader):
  break
print(len(loader))
print(input_ids.shape, attention_mask.shape, token_type_ids.shape, labels)

# 加载中文bert模型
# 加载预训练模型
pretrained = BertModel.from_pretrained('model/bert-base-chinese')
pretrained.to(device)
# 不训练,不需要计算梯度
for param in pretrained.parameters():
  param.requires_grad_(False)
# 模型试算
out = pretrained(input_ids=input_ids,
                 attention_mask=attention_mask,
                 token_type_ids=token_type_ids)
# 16个句子，500个词每句，768维度每词
print(out.last_hidden_state.shape)


# 定义下游任务模型
class Model(torch.nn.Module):
  def __init__(self):
    super().__init__()
    self.fc = torch.nn.Linear(768, 2)

  def forward(self, input_ids, attention_mask, token_type_ids):
    with torch.no_grad():
      out = pretrained(input_ids=input_ids,
                       attention_mask=attention_mask,
                       token_type_ids=token_type_ids)
    out = self.fc(out.last_hidden_state[:, 0])
    out = out.softmax(dim=1)
    return out


model = Model()
model.to(device)
# 输出16，2，意为16句话分为2分类
# print(model(input_ids=input_ids,
#      attention_mask=attention_mask,
#      token_type_ids=token_type_ids).shape)

# 训练
optimizer = torch.optim.AdamW(model.parameters(), lr=5e-4)
criterion = torch.nn.CrossEntropyLoss()
model.train()
for i, (input_ids, attention_mask, token_type_ids,
        labels) in enumerate(loader):
  out = model(input_ids=input_ids,
              attention_mask=attention_mask,
              token_type_ids=token_type_ids)
  loss = criterion(out, labels)
  loss.backward()
  optimizer.step()
  optimizer.zero_grad()
  if i % 5 == 0:
    out = out.argmax(dim=1)
    accuracy = (out == labels).sum().item() / len(labels)
    print(i, loss.item(), accuracy)
  if i == 300:
    break

# 通过torch保存为pytorch_model.bin
# torch.save(model.state_dict(), 'model/fine_tuned_bert/pytorch_model.bin')
save_file(model.state_dict(), 'model/fine_tuned_bert/model.safetensors')


# 测试
def test():
  model.eval()
  correct = 0
  total = 0

  loader_test = torch.utils.data.DataLoader(dataset=Dataset('validation'),
                                            batch_size=32,
                                            collate_fn=collate_fn,
                                            shuffle=True,
                                            drop_last=True)

  for i, (input_ids, attention_mask, token_type_ids,
          labels) in enumerate(loader_test):
    if i == 5:
      break
    print(i)
    with torch.no_grad():
      out = model(input_ids=input_ids,
                  attention_mask=attention_mask,
                  token_type_ids=token_type_ids)
    out = out.argmax(dim=1)
    correct += (out == labels).sum().item()
    total += len(labels)
  print(correct / total)


test()

end_time = time.time()  # 记录结束时间
elapsed_time = end_time - start_time  # 计算执行时间

print(f"代码执行时间：{elapsed_time:.6f} 秒")