import json
import random
import matplotlib.pyplot as plt
from bart.config import *
import numpy as np

def count_text_and_summary_len():
    text_lengths = []
    summary_lengths = []

    # 打开并读取 JSON 文件
    with open(FILE_PATH, 'r', encoding='UTF-8') as f:
        data = json.load(f)  # 加载 JSON 文件
        print(len(data))
        # 遍历每一条数据
        for item in data:
            # 提取正文内容和摘要的长度
            content_length = len(item.get('content', ''))
            title_length = len(item.get('title', ''))

            text_lengths.append(content_length)
            summary_lengths.append(title_length)

    # 绘制正文内容长度的直方图
    plt.figure(figsize=(12, 6))
    bins = np.arange(0, 5001, 250)  # 正文内容的刻度范围和间隔
    plt.hist(text_lengths, bins=bins, color='blue', alpha=0.7)
    plt.xticks(np.arange(0, 5001, 500))  # 设置刻度间隔
    plt.xlabel('Length of Text (Content)')
    plt.ylabel('Frequency')
    plt.title('Distribution of Text Lengths')
    plt.grid(axis='y', linestyle='--', alpha=0.7)  # 添加网格线
    plt.show()

    # 绘制摘要长度的直方图
    plt.figure(figsize=(12, 6))
    bins = np.arange(0, 129, 8)  # 摘要长度的刻度范围和间隔
    plt.hist(summary_lengths, bins=bins, color='green', alpha=0.7)
    plt.xticks(np.arange(0, 129, 8))  # 设置刻度间隔
    plt.xlabel('Length of Summary (Title)')
    plt.ylabel('Frequency')
    plt.title('Distribution of Summary Lengths')
    plt.grid(axis='y', linestyle='--', alpha=0.7)  # 添加网格线
    plt.show()

    # 打印最大长度
    print(f"Maximum Text Length: {max(text_lengths)}")
    print(f"Maximum Summary Length: {max(summary_lengths)}")

def split_data(file_path, output_dir, train_ratio=0.9, test_ratio=0.05, val_ratio=0.05):
    # 读取 JSON 数据
    with open(file_path, 'r', encoding='UTF-8') as f:
        data = json.load(f)

    # 确保比例总和为1
    assert abs(train_ratio + test_ratio + val_ratio - 1.0) < 1e-6, "Ratios must sum to 1."

    # 随机打乱数据
    random.shuffle(data)

    # 计算划分的索引
    total = len(data)
    print(f"total：{total}")
    train_end = int(total * train_ratio)
    test_end = train_end + int(total * test_ratio)

    # 划分数据
    train_data = data[:train_end]
    test_data = data[train_end:test_end]
    val_data = data[test_end:]

    # 输出到文件
    with open(f"{output_dir}train.json", 'w', encoding='UTF-8') as f:
        json.dump(train_data, f, ensure_ascii=False, indent=4)
    with open(f"{output_dir}test.json", 'w', encoding='UTF-8') as f:
        json.dump(test_data, f, ensure_ascii=False, indent=4)
    with open(f"{output_dir}val.json", 'w', encoding='UTF-8') as f:
        json.dump(val_data, f, ensure_ascii=False, indent=4)

    print(f"Data split completed! Saved to {output_dir}")
    print(f"Train: {len(train_data)}, Test: {len(test_data)}, Validation: {len(val_data)}")

if __name__ == '__main__':
    count_text_and_summary_len()
    split_data(FILE_PATH, OUTPUT_DIR)