import pandas as pd
from sklearn.model_selection import train_test_split
from collections import Counter
# Counter的作用是统计数据中每个标签出现的次数。
# TODO 数据加载与检查
# 加载Excel数据（假设有'text'和'label'列）
df = pd.read_excel("data.xlsx", engine='openpyxl')  # 注意需要openpyxl库

# 检查数据
# print(f"总样本数: {len(df)}")
print("标签分布:", Counter(df['emotion']))
# print("示例数据:\n", df.head(3))

# 处理缺失值（关键步骤！）
df = df.dropna(subset=['emotion', 'title'])
# print(f"处理缺失值后的样本数: {len(df)}")

# TODO 分割数据
# 分层抽样
# 首次分割：train (80%) + temp (20%)
train_df, temp_df = train_test_split(
    df,
    test_size=0.2,
    stratify=df['emotion'],  # 保持标签分布
    random_state=42       # 固定随机种子
)

# 二次分割：dev (15%) 和 test (15%)
dev_df, test_df = train_test_split(
    temp_df,
    test_size=0.5,
    stratify=temp_df['emotion'],
    random_state=42
)

# 验证分布
print("\n=== 分割结果 ===")
print(f"Train集: {len(train_df)}条 | 分布: {Counter(train_df['emotion'])}")
print(f"Dev集:   {len(dev_df)}条 | 分布: {Counter(dev_df['emotion'])}")
print(f"Test集:  {len(test_df)}条 | 分布: {Counter(test_df['emotion'])}")

# 添加文本长度列（BERT最大长度通常为512）
for df_split in [train_df, dev_df, test_df]:
    df_split['text_length'] = df_split['title'].apply(lambda x: len(str(x).split()))

# 检查长度分布
print("\n文本长度分位数(25%,50%,75%)：")
print(f"Train: {train_df['text_length'].quantile([0.25, 0.5, 0.75])}")
print(f"Dev:   {dev_df['text_length'].quantile([0.25, 0.5, 0.75])}")
print(f"Test:  {test_df['text_length'].quantile([0.25, 0.5, 0.75])}")

# 保存为CSV（UTF-8编码避免中文乱码）
train_df.to_csv("train.csv", index=False, encoding='utf-8-sig')
dev_df.to_csv("dev.csv", index=False, encoding='utf-8-sig')
test_df.to_csv("test.csv", index=False, encoding='utf-8-sig')

print("\n分割文件已保存：train.csv, dev.csv, test.csv")













