import json
import os

import pandas as pd

from v2.dataset_metadata import *


def load_and_explore_data(data_dir='../resources/dataset/top1000000/'):
    """加载数据并进行初步探索"""

    # 1. 加载训练集和测试集
    print("正在加载训练集和测试集...")
    train_df = pd.read_csv(
        os.path.join(data_dir, DATA_FILES[0]),
        sep='\t',
        names=TRAIN_TEST_COLUMNS
    )
    test_df = pd.read_csv(
        os.path.join(data_dir, DATA_FILES[1]),
        sep='\t',
        names=TRAIN_TEST_COLUMNS
    )

    # 2. 加载多模态特征文件
    print("正在加载多模态特征文件...")

    # 人脸属性
    face_attrs = {}
    with open(os.path.join(data_dir, DATA_FILES[2]), 'r', encoding='utf-8') as f:
        for line in f:
            item = json.loads(line.strip())
            face_attrs[item['item_id']] = item

    # 音频特征
    audio_features = {}
    with open(os.path.join(data_dir, DATA_FILES[3]), 'r', encoding='utf-8') as f:
        for line in f:
            item = json.loads(line.strip())
            audio_features[item['item_id']] = item

    # 标题特征
    title_features = {}
    with open(os.path.join(data_dir, DATA_FILES[4]), 'r', encoding='utf-8') as f:
        for line in f:
            item = json.loads(line.strip())
            title_features[item['item_id']] = item

    # 视频特征
    video_features = {}
    with open(os.path.join(data_dir, DATA_FILES[5]), 'r', encoding='utf-8') as f:
        for line in f:
            item = json.loads(line.strip())
            video_features[item['item_id']] = item

    # 3. 数据概览
    print("\n=== 数据概览 ===")
    print(f"训练集形状: {train_df.shape}")
    print(f"测试集形状: {test_df.shape}")
    print(f"人脸特征数量: {len(face_attrs)}")
    print(f"音频特征数量: {len(audio_features)}")
    print(f"标题特征数量: {len(title_features)}")
    print(f"视频特征数量: {len(video_features)}")

    return train_df, test_df, face_attrs, audio_features, title_features, video_features

def detailed_data_exploration(df, dataset_name):
    """详细的数据探索"""
    print(f"\n=== {dataset_name} 详细探索 ===")

    # 基本信息
    print(f"数据集形状: {df.shape}")
    print(f"列名: {df.columns.tolist()}")

    # 数据类型和缺失值
    print("\n数据类型和缺失值统计:")
    print(df.info())

    # 数值列统计
    print("\n数值列描述性统计:")
    print(df.describe())

    # 检查特定列的分布
    print("\n分类列唯一值统计:")
    categorical_cols = ['user_city', 'channel', 'finish', 'like']
    for col in categorical_cols:
        if col in df.columns:
            print(f"{col}: {df[col].value_counts()}")

    return df

def generate_data_quality_report(train_clean, test_clean, face_attrs, audio_features, title_features, video_features):
    """生成数据质量报告"""

    print("\n" + "=" * 50)
    print("数据质量报告")
    print("=" * 50)

    # 1. 基本统计
    print("\n1. 数据集基本统计:")
    print(f"训练集记录数: {len(train_clean)}")
    print(f"测试集记录数: {len(test_clean)}")
    print(f"唯一用户数: {train_clean['uid'].nunique()}")
    print(f"唯一视频数: {train_clean['item_id'].nunique()}")

    # 2. 标签分布（训练集）
    if 'finish' in train_clean.columns and 'like' in train_clean.columns:
        print("\n2. 训练集标签分布:")
        print(f"完成观看比例: {train_clean['finish'].mean():.2%}")
        print(f"点赞比例: {train_clean['like'].mean():.2%}")

        # 同时完成和点赞的比例
        both_positive = ((train_clean['finish'] == True) & (train_clean['like'] == True)).mean()
        print(f"同时完成观看和点赞比例: {both_positive:.2%}")

    # 3. 多模态特征覆盖度
    train_items = set(train_clean['item_id'].unique())
    test_items = set(test_clean['item_id'].unique())
    all_items = train_items | test_items

    print("\n3. 多模态特征覆盖度:")
    print(f"总视频数: {len(all_items)}")
    print(
        f"人脸特征覆盖: {len(set(face_attrs.keys()) & all_items)} ({len(set(face_attrs.keys()) & all_items) / len(all_items) * 100:.1f}%)")
    print(
        f"音频特征覆盖: {len(set(audio_features.keys()) & all_items)} ({len(set(audio_features.keys()) & all_items) / len(all_items) * 100:.1f}%)")
    print(
        f"标题特征覆盖: {len(set(title_features.keys()) & all_items)} ({len(set(title_features.keys()) & all_items) / len(all_items) * 100:.1f}%)")
    print(
        f"视频特征覆盖: {len(set(video_features.keys()) & all_items)} ({len(set(video_features.keys()) & all_items) / len(all_items) * 100:.1f}%)")

    # 4. 数据稀疏度
    if len(train_clean) > 0:
        sparsity = 1 - len(train_clean) / (train_clean['uid'].nunique() * train_clean['item_id'].nunique())
        print(f"\n4. 数据稀疏度: {sparsity:.4f}")

    # 5. 用户行为统计
    if len(train_clean) > 0:
        user_stats = train_clean.groupby('uid').size().describe()
        print(f"\n5. 用户行为统计:")
        print(f"平均每个用户交互视频数: {user_stats['mean']:.2f}")
        print(f"最少交互数: {user_stats['min']}")
        print(f"最多交互数: {user_stats['max']}")

    return {
        'train_records': len(train_clean),
        'test_records': len(test_clean),
        'unique_users': train_clean['uid'].nunique(),
        'unique_items': train_clean['item_id'].nunique(),
        'sparsity': sparsity if len(train_clean) > 0 else 0
    }
