
//% color="#FF8C00" iconWidth=50 iconHeight=40
namespace bert_text{

    //% block="加载数据" blockType="tag"
    export function initmodel() {}
    //% block="初始化模块" blockType="command"
    export function init(parameter: any, block: any) {

        Generator.addImport(`import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import BertTokenizer, BertModel
from torch.utils.data import TensorDataset, DataLoader
from sklearn.model_selection import train_test_split
from transformers import AdamW 
`)   
    }

    //% block="设置识别结果类别[CLASS]" blockType="command"
    //% CLASS.shadow="list" CLASS.defl="'现在立即右转','左转，请注意'"
    export function readcap1(parameter: any, block: any) {
        let classes=parameter.CLASS.code;
        Generator.addInit('classes_vocabulary',`classes_vocabulary = ${classes}`)
    }
    //% block="设置测试数据[CLASS]" blockType="command"
    //% CLASS.shadow="list" CLASS.defl="'停车', '左转', '右转'"
    export function readcapq(parameter: any, block: any) {
        let classes=parameter.CLASS.code;
        Generator.addInit('classes_vocabulary1',`test_sentences = ${classes}`)
    }
    //% block="加载[PATH]文件夹中的训练文件" blockType="command"
    //% PATH.shadow="string" PATH.defl="data"

    export function readcap1a(parameter: any, block: any) {
        let path=parameter.PATH.code;

        Generator.addCode(`data_dir = ${path}
data_files = {}
def load_data(data_dir, data_files, classes):
    words_list = []
    labels_list = []
    for idx, cls in enumerate(classes_vocabulary):
        file_path = os.path.join(data_dir, data_files[cls])
        with open(file_path, 'r', encoding='utf-8') as f:
            for line in f:
                phrase = line.strip()
                if phrase:  # 确保行不为空
                    words_list.append(phrase)
                    labels_list.append(idx)
    return words_list, labels_list
# 自动识别 data 目录中的文件并添加到 data_files 中
for file_name in os.listdir(data_dir):
    if file_name.endswith('.txt'):
        class_name = file_name.split('.')[0]
        if class_name not in data_files:
            data_files[class_name] = file_name
words_list,labels_list =load_data(data_dir, data_files, classes_vocabulary)`)
    }



 
        //% block="模型参数" blockType="tag"
        export function initmodel1() {}
    //% block="设置模型参数 卷积核大小[BG] 卷积核数量[NUM]" blockType="command"
    //% BG.shadow="list" BG.defl=2,3,4
    //% NUM.shadow="normal" NUM.defl=128
    
    export function cv_collect_set_winname(parameter: any, block: any) {
        let bg=parameter.BG.code;
        let num=parameter.NUM.code;
        Generator.addCode(`embedding_dim = 768  
num_classes = len(classes_vocabulary)
kernel_sizes = ${bg} 
num_filters = ${num}\n`)

    }

    //% block="设置训练参数 批次大小[BATCH] 学习速率[LR] 周期数[EPOCHS] 验证集与测试集的比例[SIZE]" blockType="command"
    //% BATCH.shadow="normal" BATCH.defl=16
    //% LR.shadow="normal" LR.defl=0.001
    //% EPOCHS.shadow="normal" EPOCHS.defl=30
    //% SIZE.shadow="normal" SIZE.defl=0.2
    export function train_test_split(parameter: any, block: any) {
        let batch=parameter.BATCH.code;
        let lr=parameter.LR.code;
        let epochs=parameter.EPOCHS.code;
        let size=parameter.SIZE.code;
        Generator.addCode(`batch_size = ${batch}
learning_rate = ${lr}
num_epochs = ${epochs}
val_test_size = ${size}
random_seed = 42
torch.manual_seed(random_seed)
words_val_test, _, labels_val_test, _ = train_test_split(words_list, labels_list, train_size=val_test_size, random_state=random_seed, stratify=labels_list)

words_val, words_test, labels_val, labels_test = train_test_split(words_val_test, labels_val_test, test_size=0.5, random_state=random_seed, stratify=labels_val_test)
`)

 }
//     //% block="数据和标签进行随机划分" blockType="command"
//     export function init_queue0(parameter: any, block: any) {
//         Generator.addCode(`words_val_test, _, labels_val_test, _ = train_test_split(words_list, labels_list, train_size=val_test_size, random_state=random_seed, stratify=labels_list)

// words_val, words_test, labels_val, labels_test = train_test_split(words_val_test, labels_val_test, test_size=0.5, random_state=random_seed, stratify=labels_val_test)`)
//        }
        //% block="模型训练" blockType="tag"
        export function initmodela() {}
    //% block="加载预训练模型路径[PATH] 选择[DEVICE]" blockType="command"
    //% PATH.shadow="string" PATH.defl="bert-base-chinese"
    //% DEVICE.shadow="dropdown" DEVICE.options="DEVICE"
    export function record_queue(parameter: any, block: any) {
        let path=parameter.PATH.code;
        let device=parameter.DEVICE.code;
        Generator.addCode(`\nlocal_model_path = ${path} 
tokenizer = BertTokenizer.from_pretrained(local_model_path)
bert_model = BertModel.from_pretrained(local_model_path)

device = torch.device('${device}')  
bert_model.to(device)

class TextCNN(nn.Module):
    def __init__(self, embedding_dim, num_classes, kernel_sizes, num_filters):
        super(TextCNN, self).__init__()
        self.convs = nn.ModuleList([
            nn.Conv2d(in_channels=1,
                      out_channels=num_filters,
                      kernel_size=(k, embedding_dim)) for k in kernel_sizes
        ])
        self.fc = nn.Linear(num_filters * len(kernel_sizes), num_classes)

    def forward(self, x):
        x = x.unsqueeze(1)  # (batch_size, 1, seq_length, embedding_dim)
        x = [F.relu(conv(x)).squeeze(3) for conv in self.convs]  # [(batch_size, num_filters, seq_length), ...]
        x = [F.max_pool1d(item, item.size(2)).squeeze(2) for item in x]  # [(batch_size, num_filters), ...]
        x = torch.cat(x, 1)  # (batch_size, num_filters * len(kernel_sizes))
        logits = self.fc(x)
        return logits

model = TextCNN(
    embedding_dim=embedding_dim,
    num_classes=num_classes,
    kernel_sizes=kernel_sizes,
    num_filters=num_filters
)
model.to(device)


`)
    }



    //% block="训练与验证模型" blockType="command"
    export function init_queue(parameter: any, block: any) {
        Generator.addCode(`
def encode_sentences(sentences):
    inputs = tokenizer(sentences, return_tensors='pt', padding=True, truncation=True, max_length=50)
    inputs = {key: value.to(device) for key, value in inputs.items()}
    with torch.no_grad():
        outputs = bert_model(**inputs)
    embeddings = outputs.last_hidden_state  # (batch_size, seq_length, hidden_size)
    return embeddings

embeddings_train = encode_sentences(words_list)
embeddings_val = encode_sentences(words_val)
embeddings_test = encode_sentences(words_test)

labels_train_tensor = torch.tensor(labels_list).to(device)
labels_val_tensor = torch.tensor(labels_val).to(device)
labels_test_tensor = torch.tensor(labels_test).to(device)

train_dataset = TensorDataset(embeddings_train, labels_train_tensor)
val_dataset = TensorDataset(embeddings_val, labels_val_tensor)
test_dataset = TensorDataset(embeddings_test, labels_test_tensor)

train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size)
test_loader = DataLoader(test_dataset, batch_size=batch_size)

criterion = nn.CrossEntropyLoss()
optimizer = AdamW(model.parameters(), lr=learning_rate)
for epoch in range(num_epochs):
    model.train()
    total_loss = 0
    for batch_embeddings, batch_labels in train_loader:
        optimizer.zero_grad()
        outputs = model(batch_embeddings)
        loss = criterion(outputs, batch_labels)
        loss.backward()
        optimizer.step()
        total_loss += loss.item()
    avg_train_loss = total_loss / len(train_loader)

    # 验证模型
    model.eval()
    total_correct = 0
    total_samples = 0
    with torch.no_grad():
        for val_embeddings, val_labels in val_loader:
            val_outputs = model(val_embeddings)
            _, predicted = torch.max(val_outputs.data, 1)
            total_samples += val_labels.size(0)
            total_correct += (predicted == val_labels).sum().item()
    val_accuracy = total_correct / total_samples
    print(f'Epoch [{epoch+1}/{num_epochs}], Train Loss: {avg_train_loss:.4f}, Val Accuracy: {val_accuracy:.4f}')`)
    }
        //% block="模型操作" blockType="tag"
        export function initmodela1() {}
    //% block="测试集评估模型指令" blockType="command"
    export function init_queue1(parameter: any, block: any) {
        Generator.addCode(`model.eval()
total_correct = 0
total_samples = 0
with torch.no_grad():
    for test_embeddings, test_labels in test_loader:
        test_outputs = model(test_embeddings)
        _, predicted = torch.max(test_outputs.data, 1)
        total_samples += test_labels.size(0)
        total_correct += (predicted == test_labels).sum().item()
test_accuracy = total_correct / total_samples`)     
    }
    //% block="获取评估模型测试准确率" blockType="reporter"

    export function get_queue(parameter: any, block: any) {

        Generator.addCode(`test_accuracy`)
    }
//% block="对测试数据进行编码并获取嵌入并预测" blockType="command"
export function init_queue2(parameter: any, block: any) {
    Generator.addCode(`
def encode_sentences(sentences):
    inputs = tokenizer(sentences, return_tensors='pt', padding=True, truncation=True, max_length=50)
    inputs = {key: value.to(device) for key, value in inputs.items()}
    with torch.no_grad():
        outputs = bert_model(**inputs)
    embeddings = outputs.last_hidden_state  # (batch_size, seq_length, hidden_size)
    return embeddings
result_list = []
with torch.no_grad():
    embeddings = encode_sentences(test_sentences)
    outputs = model(embeddings)
    predictions = torch.argmax(outputs, dim=1)
    for sentence, prediction in zip(test_sentences, predictions):
        result_list.append((sentence, classes_vocabulary[prediction.item()]))
`)


}     
//% block="预测第[NUM]个句子的结果" blockType="reporter"
//% NUM.shadow="number" NUM.def
export function init_queue3(parameter: any, block: any) {
    let num=parameter.NUM.code;

    Generator.addCode(`result_list[${num}][1]`)
}    
            //% block="---" blockType="tag"
            export function initmodela11() {}
    //% block="保存模型路径[PATH]" blockType="command"
    //% PATH.shadow="string" PATH.defl="bert_text_model.pth"
    export function initmodel2(parameter: any, block: any) {
        let path=parameter.PATH.code;
        Generator.addCode(`model_save_path =${path}  
torch.save(model.state_dict(), model_save_path)`) 
    
    }

    //% block="加载模型路径[PATH]" blockType="command"
    //% PATH.shadow="string" PATH.defl="bert_text_model.pth"
    export function initmodel3(parameter: any, block: any) {
        let path=parameter.PATH.code;
        Generator.addCode(`
model_save_path = ${path}  # 与之前保存的模型路径一致
model.load_state_dict(torch.load(model_save_path, map_location=device))
model.to(device)
model.eval()`)
        }

 
}