import pandas as pd
import json
from config import Bert_Config
from transformers import BertTokenizer
from tqdm import tqdm

def load_data(file_path):
    """
    读取原始txt文件，返回DataFrame
    """
    data = []
    with open(file_path, 'r', encoding='utf-8') as f:
        for line in f:
            if not line.strip():
                continue  # 跳过空行
            try:
                text, label = line.strip().split('\t')
                label = int(label)
                data.append((text, label))
            except Exception as e:
                print(f"数据格式错误：{line}，跳过")
    df = pd.DataFrame(data, columns=['text', 'label'])
    return df

def preprocess_and_save(df, tokenizer, output_path, max_len=128):
    """
    对数据进行BERT分词预处理，并保存为JSON文件
    """
    with open(output_path, 'w', encoding='utf-8') as fout:
        for idx, row in tqdm(df.iterrows(), total=len(df)):
            text = row['text']
            label = row['label']
            encoded = tokenizer.encode_plus(
                text,
                add_special_tokens=True,
                max_length=max_len,
                padding='max_length',
                truncation=True,
                return_attention_mask=True
            )
            sample = {
                "input_ids": encoded['input_ids'],
                "attention_mask": encoded['attention_mask'],
                "origin_text": text,
                "label": label
            }
            fout.write(json.dumps(sample, ensure_ascii=False) + '\n')

if __name__ == '__main__':
    config = Bert_Config()
    tokenizer = BertTokenizer.from_pretrained(config.bert_path)

    # 训练集
    df = load_data(config.train_path)
    print(df.head())
    preprocess_and_save(df, tokenizer, config.train_json_path, config.pad_size)

    # 测试集
    df = load_data(config.test_path)
    preprocess_and_save(df, tokenizer, config.test_json_path, config.pad_size)

    # 验证集
    df = load_data(config.dev_path)
    preprocess_and_save(df, tokenizer, config.dev_json_path, config.pad_size)
