
import inspect
import os
import sys
import uuid
from pprint import pprint
import numpy as np
import pandas as pd
from tqdm.auto import tqdm
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from transformers import AutoTokenizer
from Config import Paths, Config
from Utils import sep, LOGGER

tokenizer = AutoTokenizer.from_pretrained(os.path.join(Paths.OUTPUT_DIR, "tokenizer"))


def generate_unique_id(n):
    full_id = uuid.uuid4().hex.upper()  # 32位16进制大写字符串
    return full_id[:n]  # 截取前9位

def analyze_text_length_distribution(df, text_column='text', tokenizer=None,
                                     show_hist=True, bins=25,
                                     max_len_calc_method='max',
                                     special_tokens_buffer=3):

    plt.rcParams['font.sans-serif'] = ['SimHei']

    plt.rcParams['axes.unicode_minus'] = False


    if tokenizer is None:
        raise ValueError("tokenizer不能为None")

    lengths = []
    tqdm_loader = tqdm(df[text_column].fillna("").values, total=len(df))

    for text in tqdm_loader:
        length = len(tokenizer(text, add_special_tokens=False)['input_ids'])
        lengths.append(length)

    if max_len_calc_method == 'max':
        suggested_max_len = max(lengths) + special_tokens_buffer
    elif max_len_calc_method == 'per':
        suggested_max_len = int(np.percentile(lengths, 90)) + (special_tokens_buffer - 1)
    else:
        raise ValueError("max_len_calc_method必须是'max'或'percentile'")

    LOGGER.info(f"建议的MAX_LEN: {suggested_max_len}")

    if show_hist:
        plt.hist(lengths, bins=bins)
        plt.title('文本长度分布')
        plt.xlabel('Token数量')
        plt.ylabel('频数')
        plt.show()

    return {

        'suggested_max_len': suggested_max_len,
        'stats': {
            'min': min(lengths),
            'max': max(lengths),
            'mean': np.mean(lengths),
            'median': np.median(lengths),
            'percentile_90': np.percentile(lengths, 90),
            'percentile_95': np.percentile(lengths, 95),
            'percentile_99': np.percentile(lengths, 99)
        }
    }

def analyze_df(df, target_col='generated'):

    caller_frame = inspect.currentframe().f_back
    variable_name = None
    for name, val in caller_frame.f_locals.items():
        if val is df:
            variable_name = name
            break

    stats = {
        'shape': df.shape,
        'dtypes': df.dtypes,
        'missing_values': df.isnull().sum(),
        'class_distribution': df[target_col].value_counts(normalize=True).to_dict(),
        'head': df.head()
    }

    sep()
    print(f"{variable_name} 的形状: {stats['shape']}")
    print("\n数据类型:\n", stats['dtypes'])
    print("\n缺失值统计:\n", stats['missing_values'])
    print(f"{variable_name} 的{target_col}类别分布:\n{pd.Series(stats['class_distribution'])}")
    print("\n前5行数据:")
    print("\n", stats['head'])
    sep()

    return stats

def analyze_dataset(df):
    """基础数据集分析函数"""

    caller_frame = inspect.currentframe().f_back
    df_name = None
    for name, val in caller_frame.f_locals.items():
        if val is df:
            df_name = name
            break

    analysis = {
        'shape': df.shape,
        'dtypes': df.dtypes,
        'missing_values': df.isnull().sum(),
        'head': df.head()
    }

    sep()
    print(f"{df_name} 形状: {analysis['shape']}")
    print("\n数据类型:\n", analysis['dtypes'])
    print("\n缺失值统计:\n", analysis['missing_values'])
    print("\n前5行数据:")
    print("\n",analysis['head'])
    sep()
    return analysis


def datapreprocess():
    external_df = pd.read_csv("input/dataset/daigt_external_dataset.csv", sep=',')

    external_df1 = pd.DataFrame({
        "id": external_df['id'],
        "text": external_df['source_text'].str.replace("\n", " "),
        "generated": 1
    })

    external_df2 = pd.DataFrame({
        "id": external_df['id'],
        "text": external_df['text'].str.replace("\n", " "),
        "generated": 0
    })

    external_df = pd.concat([external_df1, external_df2]).dropna(subset=['text', 'generated'], how='any').drop_duplicates(subset=['text']) # 清洗为空并去重
    external_df.reset_index(inplace=True, drop=True) # 重置行索引，使其从0开始连续编号 drop=True 表示不保留旧的索引列
    external_df.to_csv("./input/save/f_daigt_external_dataset.csv", index=False)
    analyze_df(external_df)

    drcat_df = pd.read_csv("./input/dataset/train_v2_drcat_02.csv", sep=',')
    drcat_df1 = pd.DataFrame({
        'id': [generate_unique_id(9) for _ in range(len(drcat_df))],
        'text': drcat_df['text'].str.replace('\n', '',regex=True),
        'generated': drcat_df['label']
    })

    drcat_df = drcat_df1.dropna(subset=['text', 'generated'], how='any').drop_duplicates(subset=['text']) # 清洗为空并去重
    drcat_df.reset_index(inplace=True, drop=True)
    drcat_df.to_csv("./input/save/f_train_v2_drcat_02.csv", index=False)
    analyze_df(drcat_df)

    MAGE_df = pd.read_csv("./input/dataset/yaful-MAGE-test.csv", sep=',')
    MAGE_df1 = pd.DataFrame({
        'id': [generate_unique_id(9) for _ in range(len(MAGE_df))],
        'text': MAGE_df['text'].str.replace('\n', '',regex=True),
        'generated': MAGE_df['label']
    })

    MAGE_df1['generated'] = MAGE_df1['generated'].replace({1: 0, 0: 1})
    MAGE_df = MAGE_df1.dropna(subset=['text', 'generated'], how='any').drop_duplicates(subset=['text']) # 清洗为空并去重
    MAGE_df.reset_index(inplace=True, drop=True)
    MAGE_df.to_csv("./input/save/f_yaful-MAGE-test.csv", index=False)
    analyze_df(MAGE_df)

    AandHG_df = pd.read_csv("./input/dataset/Ateeqq-AI-and-Human-Generated-Text.csv", sep=',')
    AandHG_df1 = pd.DataFrame({
        'id': [generate_unique_id(9) for _ in range(len(AandHG_df))],
        'text': AandHG_df['abstract'].str.replace('\n', '',regex=True),
        'generated': AandHG_df['label']
    })

    AandHG_df = AandHG_df1.dropna(subset=['text', 'generated'], how='any').drop_duplicates(subset=['text'])  # 清洗为空并去重
    AandHG_df.reset_index(inplace=True, drop=True)
    AandHG_df.to_csv("./input/save/f_Ateeqq-AI-and-Human-Generated-Text.csv", index=False)
    analyze_df(AandHG_df)

    HC3_df = pd.read_csv("./input/dataset/Hello-SimpleAI-HC3.csv", sep=',')
    HC3_df1 = pd.DataFrame({
        'id': HC3_df['id'],
        'text': HC3_df['human_answers'].str.replace('[', '').str.replace(']', '').str.replace('\n', ' '),
        'generated': 0
    })
    HC3_df2 = pd.DataFrame({
        'id': HC3_df['id'],
        'text': HC3_df['chatgpt_answers'].str.replace('[', '').str.replace(']', '').str.replace('\n', ' '),
        'generated': 1
    })
    HC3_df = pd.concat([HC3_df1, HC3_df2]).dropna(subset=['text', 'generated'],how='any').drop_duplicates(subset=['text'])  # 清洗为空并去重
    HC3_df.reset_index(inplace=True, drop=True)  # 重置行索引，使其从0开始连续编号 drop=True 表示不保留旧的索引列
    analyze_df(HC3_df)
    HC3_df.to_csv("./input/save/f_Hello-SimpleAI-HC3.csv", index=False)






def split(df,percentage):
    train_df, test_df = train_test_split(
        df,
        test_size=percentage,
        stratify=df['generated'],
        random_state=Config.SEED
    )
    return train_df, test_df

def sample(df, num):

    sampled_0 = df[df['generated'] == 0].sample(n=num, random_state=Config.SEED)

    sampled_1 = df[df['generated'] == 1].sample(n=num, random_state=Config.SEED)

    final_sample = pd.concat([sampled_0, sampled_1])
    return final_sample

def check_id(df):
    assert df['id'].nunique() == len(df), f"ID重复！"

def get_all_data():
    drcat_df = pd.read_csv("./input/save/f_train_v2_drcat_02.csv", sep=',')
    sample_drcat_df = sample(drcat_df, 2500)
    sample_drcat_df.reset_index(inplace=True, drop=True)
    sample_drcat_df.to_csv("./input/save/s2_f_train_v2_drcat_02.csv", index=False)
    analyze_df(sample_drcat_df)

    AandHG_df = pd.read_csv("./input/save/f_Ateeqq-AI-and-Human-Generated-Text.csv", sep=',')
    AandHG_df = AandHG_df.dropna(subset=['text', 'generated'], how='any')
    sample_AandHG_df = sample(AandHG_df,5000).dropna(subset=['text', 'generated'],
                                                                 how='any')
    sample_AandHG_df.reset_index(inplace=True, drop=True)
    sample_AandHG_df.to_csv("./input/save/s_f_Ateeqq-AI-and-Human-Generated-Text.csv", index=False)
    analyze_df(sample_AandHG_df)

    MAGE_df = pd.read_csv("./input/save/f_yaful-MAGE-test.csv", sep=',')
    sample_MAGE_df = sample(MAGE_df, 2500)
    sample_MAGE_df.reset_index(inplace=True, drop=True)
    sample_MAGE_df.to_csv("./input/save/s2_f_yaful-MAGE-test.csv", index=False)
    analyze_df(sample_MAGE_df)

    all_df = pd.concat([sample_drcat_df, sample_MAGE_df], ignore_index=True)
    all_df.sample(frac=1,random_state=Config.SEED).reset_index(drop=True, inplace=True)
    all_df['id'] = [generate_unique_id(10) for _ in range(len(all_df))]
    check_id(all_df)
    analyze_df(all_df)
    all_df.to_csv("./input/save/all_data.csv", index=False)

def get_train_test(df,percentage):
    train_df,test_df=split(df, percentage)
    train_df.reset_index(drop=True, inplace=True)
    test_df.reset_index(drop=True, inplace=True)
    analyze_df(train_df)
    analyze_df(test_df)
    train_df.to_csv("./input/save/train.csv", index=False)
    test_df.to_csv("./input/save/test.csv", index=False)
    train_ids = set(pd.read_csv("./input/save/train.csv")['id'])
    test_ids = set(pd.read_csv("./input/save/test.csv")['id'])
    assert len(train_ids & test_ids) == 0  # 确认交集为空

def main():


    df = pd.read_csv("./input/save/all_data.csv")

    print(analyze_text_length_distribution(df,tokenizer=tokenizer,max_len_calc_method='per'))
    sys.exit()

if __name__ == "__main__":
    main()