import pandas as pd
import os


def analyze_facial_attributes(file_path, source_name, analyze_dir):
    # 读取 CSV 文件
    df = pd.read_csv(file_path)

    # 假设最后一列是标签列，取最后一列
    last_column = df.columns[-1]

    # 统计该列的标签分布
    label_counts = df[last_column].value_counts()

    # 计算标签占比
    total_labels = label_counts.sum()  # 使用sum()而不是手动sum
    label_weights = label_counts / total_labels  # 直接进行按总数除法

    # 找到主导标签
    dominant_label = label_counts.idxmax()
    dominant_label_count = label_counts.max()

    # 将结果存储到 DataFrame
    analyze_df = pd.DataFrame({
        'Label': label_counts.index,
        'Count': label_counts.values,
        'Weight': label_weights.values
    })

    # 添加主导标签的信息
    dominant_info = pd.DataFrame([['Dominant Label', dominant_label, dominant_label_count]],
                                 columns=['Label', 'Count', 'Weight'])
    analyze_df = pd.concat([analyze_df, dominant_info], ignore_index=True)

    # 输出目录如果不存在，则创建它
    os.makedirs(analyze_dir, exist_ok=True)

    # 生成输出文件路径
    analyze_file = os.path.join(analyze_dir, f'analyze_{source_name}.csv')

    # 保存到 CSV 文件
    analyze_df.to_csv(analyze_file, index=False)

    print(f"\nAnalysis results saved to: {analyze_file}")


def process_files_in_directory(input_dir, analyze_dir):
    # 获取输出目录下所有 CSV 文件
    csv_files = [f for f in os.listdir(input_dir) if f.endswith('_features.csv')]

    # 逐个文件分析
    for file in csv_files:
        file_path = os.path.join(input_dir, file)
        print(f"Analyzing file: {file_path}")
        file_name = os.path.relpath(file_path, start='./data/out_feature')
        source_name = file_name[:-len('_features.csv')]
        analyze_facial_attributes(file_path, source_name,analyze_dir)


if __name__ == '__main__':
    input_dir = '../data/out_feature'  # 输入目录路径
    analyze_dir = './data/analyze'  # 结果保存目录
    process_files_in_directory(input_dir, analyze_dir)
