import os
import torch
from torch.utils.data import Dataset,DataLoader
from transformers import BertTokenizerFast
from pathlib import Path
from typing import List
from collections import namedtuple
# 你要的类别集合
cls_ = ["类别a","类别b"]
# 先定义两个 类别映射后面处理数据集和转换分类结果用得到
name2id = {x:idx for idx,x in enumerate(cls_)}
id2name = {idx:x for idx,x in enumerate(cls_)}
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
data = namedtuple("data",["txt","label"])
class YourDataSet(Dataset):
    def __init__(self,dir:str,c):
        # 定义数据库文件夹目录
        p = Path(dir)
        if p.exists() and p.is_dir():
            self.filelist = [sub_p for sub_p in p.iterdir() if sub_p.is_file()]
            self.cls = c
            # 数据缓存列表
            self.db = []
            self.processData(self.filelist)
            self.lenght =len(self.db)
        else:
            raise ValueError(f"{dir} not found")
        
    # 这个函数不是必须重写的这里用来分离数据加载逻辑
    def processData(self,flist:List[Path]):
        for f in flist:
            # 这里检查文件名是不是在你的类别集合里面
            if f.stem() in self.cls:
                with open(f.absolute(),"r",encoding="utf-8") as wf:
                    # 按照行来取文本
                    for line in wf:
                        self.db.append(data(txt=f"{line.strip()}",label=name2id[f.stem()]))
    # 必须重写
    def __getitem__(self,idx):
        return self.db[idx]
    def __len__(self):
        return self.lenght
    
# 上面这部分就定义好了数据集了，如果训练集，评估集，测试集没有分多个文件夹，就得预处理拆分一下，下面主要是定义 `collate_fn`
tokenizer = BertTokenizerFast.from_pretrained("./")
def bertCollate(dl:List[data]):
    # 这里要返回一个batch(val,label) 的元组
    txts = []
    labels =[]
    for d in dl:
        txts.append(d.txt)
        labels.append(d.label)
    inputs = tokenizer(text=txts,padding="max_length",max_length=510,return_tensors="pt")
    inputs["input_ids"].to(device)
    inputs["token_type_ids"].to(device)
    inputs["attention_mask"].to(device)
    return inputs,torch.LongTensor(labels).to(device)

# 定义loader
testloader = DataLoader(YourDataSet("testdir"),batch_size=32,shuffle=True,collate_fn=bertCollate)
evalloader = DataLoader(YourDataSet("evaldir"),collate_fn=bertCollate)
# 定义model
class yourmodel(torch.nn):
    def __init__(self):
        super().__init__()

model = yourmodel()
opt = torch.optim.AdamW(model.parameters())
loss_fn = torch.nn.functional.cross_entropy
# 训练大迭代
from torch.nn.utils import clip_grad_norm_
max_epoch =20
for i in range(max_epoch):
    model.train()
    cur_loss = 0.0
    i =0
    for inputs,labels in testloader:
        pred = model(**inputs)
        loss = loss_fn(pred,labels)
        cur_loss += loss.item()
        i += 1
        loss.backward()
        # L1 梯度裁剪，防止bert 爆炸
        clip_grad_norm_(model.parameters(), 1.0)
        opt.step()
        opt.zero_grad()
    print(f"train loss is {cur_loss/i}")
    model.eval()
    cur_loss =0.0
    i =0
    with torch.no_grad():
        for inputs,labels in evalloader:
            pred = model(**inputs)
            loss = loss_fn(pred,labels)
            cur_loss += loss.item()
            i += 1        
    print(f"eval loss is {cur_loss/i}")



        




