import os
import tensorflow as tf
from transformers import TFBertForSequenceClassification, MobileBertTokenizer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder  # 导入 LabelEncoder
import pandas as pd
#目前使用的vocabulary是google的mobilebert-uncased，该vocabulary主要适用于英文文本，如果需要处理中文文本，需要使用中文的vocabulary
#中文的vocabulary可以在huggingface的transformers库中找到，例如：bert-base-chinese，代码中只需要将MobileBertTokenizer.from_pretrained("google/mobilebert-uncased")
# 改为MobileBertTokenizer.from_pretrained("bert-base-chinese")即可
# 设置路径
# CURRENT_PATH = "/CASR/textclassification/BERT"
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = CURRENT_PATH + "/data/clean_data/"
MODEL_SAVE_PATH = CURRENT_PATH + "/models/saved_model/"


# 加载数据
def load_data(data_dir):
    texts, labels = [], []
    for file in os.listdir(data_dir):
        if file.endswith(".xlsx"):
            file_path = os.path.join(data_dir, file)
            try:
                df = pd.read_excel(file_path, engine="openpyxl")  # Specify the engine
                texts.extend(df['text'].tolist())
                labels.extend(df['label'].tolist())
            except Exception as e:
                print(f"Error reading file {file_path}: {e}")
    
    # 使用 LabelEncoder 将字符串标签转换为整数
    label_encoder = LabelEncoder()
    labels = label_encoder.fit_transform(labels).tolist()
    
    return texts, labels

# 数据预处理
def preprocess_data(tokenizer, texts, labels, max_length=50):
    inputs = tokenizer(
        texts,
        max_length=max_length,
        padding=True,
        truncation=True,
        return_tensors="tf"
    )
    labels = tf.constant(labels, dtype=tf.int32)
    return inputs, labels

# 模型训练
def train_model():
    # 加载数据
    texts, labels = load_data(DATA_DIR)
    tokenizer = MobileBertTokenizer.from_pretrained("google/mobilebert-uncased")#加载英文的vocabulary
    # tokenizer = MobileBertTokenizer.from_pretrained("uer/mobilebert-base-chinese")#加载中文的vocabulary/
    tokenizer.save_pretrained(MODEL_SAVE_PATH)
    # 分割数据集
    train_texts, val_texts, train_labels, val_labels = train_test_split(
        texts, labels, test_size=0.2, random_state=42
    )
    
    # 处理数据
    train_inputs, train_labels = preprocess_data(tokenizer, train_texts, train_labels)
    val_inputs, val_labels = preprocess_data(tokenizer, val_texts, val_labels)

    # 模型初始化
    model = TFBertForSequenceClassification.from_pretrained(
        "google/mobilebert-uncased", num_labels=len(set(labels))  # 使用类别数作为 num_labels
    )
    optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5)
    loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
    model.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"])
    #可视化工具
    # tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=1)
    # 训练模型
    model.fit(
        train_inputs.data,
        train_labels,
        validation_data=(val_inputs.data, val_labels),
        epochs=1,
        batch_size=8,
        # callbacks=[tensorboard_callback]
    )

    # 保存模型
    model.save_pretrained(MODEL_SAVE_PATH)
    tokenizer.save_pretrained(MODEL_SAVE_PATH)
    print(f"Model saved to {MODEL_SAVE_PATH}")

if __name__ == "__main__":
    train_model()
