import numpy as np
import pandas as pd
import torch
from sklearn.preprocessing import StandardScaler
import v2.data_loader_explorer as data_loader_explorer
import v2.data_cleaner_handler as data_cleaner_handler


def build_user_features(train_clean, test_clean):
    """构建用户节点特征"""
    print("开始构建用户节点特征...")

    # 合并训练集和测试集以获取完整的用户信息
    all_data = pd.concat([train_clean, test_clean], ignore_index=True)

    # 1. 用户基础特征编码
    def encode_basic_user_features(df):
        """编码用户基础特征"""
        user_basic_features = {}

        # 获取所有用户
        all_users = df['uid'].unique()

        for uid in all_users:
            user_data = df[df['uid'] == uid]

            # 基础特征
            user_city = user_data['user_city'].iloc[0] if len(user_data) > 0 else -999

            # 设备信息（使用最常见的设备）
            most_common_device = user_data['did'].mode()[0] if len(user_data['did'].mode()) > 0 else -999

            user_basic_features[uid] = {
                'user_city': user_city,
                'most_common_device': most_common_device
            }

        return user_basic_features

    # 2. 用户行为统计特征
    def extract_behavioral_features(df):
        """提取用户行为统计特征"""
        user_behavior_features = {}

        for uid, user_data in df.groupby('uid'):
            # 基本统计
            total_interactions = len(user_data)
            finish_rate = user_data['finish'].mean() if 'finish' in user_data.columns else 0
            like_rate = user_data['like'].mean() if 'like' in user_data.columns else 0

            # 视频时长相关统计
            avg_duration = user_data['video_duration'].mean()
            std_duration = user_data['video_duration'].std()

            # 渠道偏好（one-hot风格）
            channel_counts = user_data['channel'].value_counts().to_dict()
            total_channels = sum(channel_counts.values())
            channel_preferences = {f'channel_{k}': v / total_channels for k, v in channel_counts.items()}

            # 时间模式（如果creat_time有含义）
            time_stats = {
                'first_interaction': user_data['creat_time'].min(),
                'last_interaction': user_data['creat_time'].max(),
                'interaction_span': user_data['creat_time'].max() - user_data['creat_time'].min()
            }

            user_behavior_features[uid] = {
                'total_interactions': total_interactions,
                'finish_rate': finish_rate,
                'like_rate': like_rate,
                'avg_duration': avg_duration,
                'std_duration': std_duration if not np.isnan(std_duration) else 0,
                **channel_preferences,
                **time_stats
            }

        return user_behavior_features

    # 3. 用户兴趣特征（基于交互的视频类别）
    def extract_interest_features(df, item_features):
        """提取用户兴趣特征"""
        user_interest_features = {}

        # 这里假设我们有item_features包含视频的类别信息
        # 在实际数据中，可能需要从其他来源获取视频类别

        for uid, user_data in df.groupby('uid'):
            interacted_items = user_data['item_id'].unique()

            # 简单的兴趣向量：基于交互视频的统计
            # 在实际应用中，这里可以是基于内容标签的向量
            interest_vector = {
                'num_unique_items': len(interacted_items),
                'avg_item_popularity': 0,  # 需要物品流行度数据
            }

            user_interest_features[uid] = interest_vector

        return user_interest_features

    # 4. 特征整合和归一化
    def integrate_and_normalize_user_features(basic_features, behavioral_features, interest_features):
        """整合并归一化用户特征"""
        all_users = set(basic_features.keys())
        user_final_features = {}

        # 收集所有特征用于归一化
        all_feature_vectors = []
        feature_names = []

        for uid in all_users:
            feature_vector = []

            # 基础特征
            basic = basic_features.get(uid, {})
            feature_vector.extend([
                basic.get('user_city', -999),
                basic.get('most_common_device', -999)
            ])

            # 行为特征
            behavioral = behavioral_features.get(uid, {})
            feature_vector.extend([
                behavioral.get('total_interactions', 0),
                behavioral.get('finish_rate', 0),
                behavioral.get('like_rate', 0),
                behavioral.get('avg_duration', 0),
                behavioral.get('std_duration', 0),
                behavioral.get('first_interaction', 0),
                behavioral.get('last_interaction', 0),
                behavioral.get('interaction_span', 0)
            ])

            # 渠道偏好特征
            for channel in range(4):  # 假设有4个渠道
                channel_key = f'channel_{channel}'
                feature_vector.append(behavioral.get(channel_key, 0))

            # 兴趣特征
            interest = interest_features.get(uid, {})
            feature_vector.extend([
                interest.get('num_unique_items', 0),
                interest.get('avg_item_popularity', 0)
            ])

            all_feature_vectors.append(feature_vector)

        # 归一化
        if all_feature_vectors:
            scaler = StandardScaler()
            normalized_vectors = scaler.fit_transform(all_feature_vectors)

            # 分配回用户
            for i, uid in enumerate(all_users):
                user_final_features[uid] = normalized_vectors[i]

        # 记录特征维度
        feature_dim = len(all_feature_vectors[0]) if all_feature_vectors else 0

        return user_final_features, feature_dim, scaler

    # 执行用户特征构建
    print("1. 编码基础特征...")
    user_basic_features = encode_basic_user_features(all_data)

    print("2. 提取行为特征...")
    user_behavioral_features = extract_behavioral_features(all_data)

    print("3. 提取兴趣特征...")
    user_interest_features = extract_interest_features(all_data, {})  # 这里需要实际的item_features

    print("4. 整合和归一化特征...")
    user_final_features, user_feature_dim, user_scaler = integrate_and_normalize_user_features(
        user_basic_features, user_behavioral_features, user_interest_features
    )

    print(f"用户特征构建完成! 特征维度: {user_feature_dim}")
    print(f"覆盖用户数: {len(user_final_features)}")

    return user_final_features, user_feature_dim, user_scaler



def process_video_multimodal_features(train_clean, test_clean, face_attrs, audio_features, title_features,
                                      video_features):
    """处理视频节点的多模态特征"""
    print("开始处理视频多模态特征...")

    # 获取所有视频ID
    train_items = set(train_clean['item_id'].unique())
    test_items = set(test_clean['item_id'].unique())
    all_items = train_items | test_items

    # 1. 视觉特征处理（人脸属性）
    def process_visual_features(face_attrs, all_items):
        """处理视觉特征（人脸属性）"""
        visual_features = {}

        print("处理视觉特征...")
        for item_id in all_items:
            if item_id in face_attrs and len(face_attrs[item_id]['face_attrs']) > 0:
                face_data = face_attrs[item_id]['face_attrs'][0]  # 取第一个人脸
                visual_features[item_id] = [
                    face_data['gender'],  # 性别 (0/1)
                    face_data['beauty'],  # 颜值评分
                    *face_data['relative_position']  # 位置 [x_min, y_min, width, height]
                ]
            else:
                # 无人脸时使用默认值
                visual_features[item_id] = [-1, 0.0, 0.0, 0.0, 0.0, 0.0]

        return visual_features

    # 2. 音频特征处理
    def process_audio_features(audio_features, all_items):
        """处理音频特征"""
        audio_vectors = {}

        print("处理音频特征...")
        for item_id in all_items:
            if item_id in audio_features:
                audio_vec = np.array(audio_features[item_id]['audio_feature_128_dim'])
                # 标准化
                if np.std(audio_vec) > 0:
                    audio_vec = (audio_vec - np.mean(audio_vec)) / np.std(audio_vec)
                audio_vectors[item_id] = audio_vec.tolist()
            else:
                # 无音频特征时填充零向量
                audio_vectors[item_id] = [0.0] * 128

        return audio_vectors

    # 3. 文本特征处理（标题TF）
    def process_text_features(title_features, all_items):
        """处理文本特征"""
        print("处理文本特征...")

        # 构建全局词汇表
        vocab = set()
        for item_data in title_features.values():
            vocab.update(item_data['title_features'].keys())
        vocab = sorted(list(vocab))  # 排序以保证一致性
        vocab_size = len(vocab)

        print(f"构建词汇表，大小: {vocab_size}")

        # 创建TF向量
        text_vectors = {}
        for item_id in all_items:
            if item_id in title_features:
                tf_dict = title_features[item_id]['title_features']
                # 创建TF向量
                tf_vector = [tf_dict.get(word, 0) for word in vocab]
                # TF-IDF风格归一化
                total = sum(tf_vector)
                if total > 0:
                    tf_vector = [count / total for count in tf_vector]
                text_vectors[item_id] = tf_vector
            else:
                # 无标题特征时填充零向量
                text_vectors[item_id] = [0.0] * vocab_size

        return text_vectors, vocab

    # 4. 视频内容特征处理
    def process_video_content_features(video_features, all_items):
        """处理视频内容特征"""
        video_vectors = {}

        print("处理视频内容特征...")
        for item_id in all_items:
            if item_id in video_features:
                video_vec = np.array(video_features[item_id]['video_feature_dim_128'])
                # 标准化
                if np.std(video_vec) > 0:
                    video_vec = (video_vec - np.mean(video_vec)) / np.std(video_vec)
                video_vectors[item_id] = video_vec.tolist()
            else:
                # 无视频特征时填充零向量
                video_vectors[item_id] = [0.0] * 128

        return video_vectors

    # 5. 视频元数据特征
    def process_video_metadata(train_clean, test_clean, all_items):
        """处理视频元数据特征"""
        print("处理视频元数据特征...")

        all_data = pd.concat([train_clean, test_clean], ignore_index=True)
        metadata_features = {}

        for item_id in all_items:
            item_data = all_data[all_data['item_id'] == item_id]

            if len(item_data) > 0:
                # 基础元数据
                author_id = item_data['author_id'].iloc[0]
                item_city = item_data['item_city'].iloc[0]
                music_id = item_data['music_id'].iloc[0] if 'music_id' in item_data.columns else -999
                video_duration = item_data['video_duration'].iloc[0]

                # 统计特征
                popularity = len(item_data)  # 交互次数作为流行度
                finish_rate = item_data['finish'].mean() if 'finish' in item_data.columns else 0
                like_rate = item_data['like'].mean() if 'like' in item_data.columns else 0

                metadata_features[item_id] = [
                    author_id, item_city, music_id, video_duration,
                    popularity, finish_rate, like_rate
                ]
            else:
                # 无元数据时使用默认值
                metadata_features[item_id] = [-999, -999, -999, 0, 0, 0, 0]

        return metadata_features

    # 执行多模态特征处理
    visual_features = process_visual_features(face_attrs, all_items)
    audio_vectors = process_audio_features(audio_features, all_items)
    text_vectors, vocab = process_text_features(title_features, all_items)
    video_vectors = process_video_content_features(video_features, all_items)
    metadata_features = process_video_metadata(train_clean, test_clean, all_items)

    # 6. 多模态特征整合
    def integrate_multimodal_features(visual_features, audio_vectors, text_vectors, video_vectors, metadata_features,
                                      all_items):
        """整合多模态特征"""
        print("整合多模态特征...")

        multimodal_features = {}
        feature_dimensions = {}

        for item_id in all_items:
            # 收集所有模态的特征
            all_features = []

            # 视觉特征
            visual_feat = visual_features.get(item_id, [])
            all_features.extend(visual_feat)

            # 音频特征
            audio_feat = audio_vectors.get(item_id, [])
            all_features.extend(audio_feat)

            # 文本特征
            text_feat = text_vectors.get(item_id, [])
            all_features.extend(text_feat)

            # 视频内容特征
            video_feat = video_vectors.get(item_id, [])
            all_features.extend(video_feat)

            # 元数据特征
            metadata_feat = metadata_features.get(item_id, [])
            all_features.extend(metadata_feat)

            multimodal_features[item_id] = all_features

        # 记录各模态维度
        feature_dimensions = {
            'visual': len(visual_feat) if all_items else 0,
            'audio': len(audio_feat) if all_items else 0,
            'text': len(text_feat) if all_items else 0,
            'video_content': len(video_feat) if all_items else 0,
            'metadata': len(metadata_feat) if all_items else 0,
            'total': len(all_features) if all_items else 0
        }

        return multimodal_features, feature_dimensions

    multimodal_features, feature_dimensions = integrate_multimodal_features(
        visual_features, audio_vectors, text_vectors, video_vectors, metadata_features, all_items
    )

    print(f"视频多模态特征处理完成!")
    print(f"特征维度统计: {feature_dimensions}")
    print(f"覆盖视频数: {len(multimodal_features)}")

    return multimodal_features, feature_dimensions, vocab


def enhance_and_finalize_features(user_features, video_features, train_clean, feature_dims):
    """特征增强和最终处理"""
    print("开始特征增强和最终处理...")

    # 1. 特征降维（如果需要）
    def apply_dimensionality_reduction(features, target_dim=64):
        """应用特征降维"""
        from sklearn.decomposition import PCA

        print(f"应用特征降维，目标维度: {target_dim}")

        feature_dict = features.copy()
        items = list(feature_dict.keys())
        feature_matrix = np.array([feature_dict[item] for item in items])

        # 如果当前维度大于目标维度，应用PCA
        if feature_matrix.shape[1] > target_dim:
            pca = PCA(n_components=target_dim)
            reduced_features = pca.fit_transform(feature_matrix)

            # 更新特征字典
            for i, item in enumerate(items):
                feature_dict[item] = reduced_features[i]

            print(f"降维完成: {feature_matrix.shape[1]} -> {target_dim} 维度")
            print(f"解释方差比: {np.sum(pca.explained_variance_ratio_):.3f}")
        else:
            print(f"当前维度 {feature_matrix.shape[1]} 小于目标维度 {target_dim}，跳过降维")

        return feature_dict

    # 2. 创建特征嵌入层
    def create_feature_embeddings(user_features, video_features, user_feature_dim, video_feature_dim):
        """创建特征嵌入层"""
        print("创建特征嵌入层...")

        # 用户嵌入层
        user_embedding_dim = min(64, user_feature_dim)
        user_embedding = torch.nn.Embedding(
            num_embeddings=len(user_features) + 1,  # +1 for padding
            embedding_dim=user_embedding_dim
        )

        # 视频嵌入层
        video_embedding_dim = min(64, video_feature_dim)
        video_embedding = torch.nn.Embedding(
            num_embeddings=len(video_features) + 1,  # +1 for padding
            embedding_dim=video_embedding_dim
        )

        # 创建ID到索引的映射
        user_id_to_idx = {uid: idx + 1 for idx, uid in enumerate(user_features.keys())}  # 0 reserved for padding
        video_id_to_idx = {vid: idx + 1 for idx, vid in enumerate(video_features.keys())}

        print(f"用户嵌入: {len(user_features)} -> {user_embedding_dim}D")
        print(f"视频嵌入: {len(video_features)} -> {video_embedding_dim}D")

        return {
            'user_embedding': user_embedding,
            'video_embedding': video_embedding,
            'user_id_to_idx': user_id_to_idx,
            'video_id_to_idx': video_id_to_idx
        }

    # 3. 特征标准化
    def standardize_features(features):
        """标准化特征"""
        print("标准化特征...")

        feature_dict = features.copy()
        items = list(feature_dict.keys())
        feature_matrix = np.array([feature_dict[item] for item in items])

        # 标准化
        scaler = StandardScaler()
        standardized_features = scaler.fit_transform(feature_matrix)

        # 更新特征字典
        for i, item in enumerate(items):
            feature_dict[item] = standardized_features[i]

        return feature_dict, scaler

    # 4. 创建特征缓存
    def create_feature_cache(user_features, video_features, user_id_to_idx, video_id_to_idx):
        """创建特征缓存以便快速访问"""
        print("创建特征缓存...")

        feature_cache = {
            'user_features': {},
            'video_features': {},
            'user_raw_features': user_features,
            'video_raw_features': video_features
        }

        # 用户特征缓存
        for uid, features in user_features.items():
            idx = user_id_to_idx.get(uid, 0)
            feature_cache['user_features'][idx] = torch.FloatTensor(features)

        # 视频特征缓存
        for vid, features in video_features.items():
            idx = video_id_to_idx.get(vid, 0)
            feature_cache['video_features'][idx] = torch.FloatTensor(features)

        return feature_cache

    # 执行特征增强
    print("1. 应用特征降维...")
    user_features_reduced = apply_dimensionality_reduction(user_features, target_dim=32)
    video_features_reduced = apply_dimensionality_reduction(video_features, target_dim=128)

    print("2. 标准化特征...")
    user_features_std, user_scaler = standardize_features(user_features_reduced)
    video_features_std, video_scaler = standardize_features(video_features_reduced)

    print("3. 创建嵌入层...")
    user_feature_dim = len(next(iter(user_features_std.values()))) if user_features_std else 0
    video_feature_dim = len(next(iter(video_features_std.values()))) if video_features_std else 0

    embedding_layers = create_feature_embeddings(
        user_features_std, video_features_std, user_feature_dim, video_feature_dim
    )

    print("4. 创建特征缓存...")
    feature_cache = create_feature_cache(
        user_features_std, video_features_std,
        embedding_layers['user_id_to_idx'], embedding_layers['video_id_to_idx']
    )

    # 最终特征统计
    final_stats = {
        'num_users': len(user_features_std),
        'num_videos': len(video_features_std),
        'user_feature_dim': user_feature_dim,
        'video_feature_dim': video_feature_dim,
        'user_embedding_dim': embedding_layers['user_embedding'].embedding_dim,
        'video_embedding_dim': embedding_layers['video_embedding'].embedding_dim
    }

    print(f"\n特征工程完成!")
    print(f"最终统计: {final_stats}")

    return {
        'user_features': user_features_std,
        'video_features': video_features_std,
        'embedding_layers': embedding_layers,
        'feature_cache': feature_cache,
        'scalers': {
            'user': user_scaler,
            'video': video_scaler
        },
        'stats': final_stats
    }


def main_node_feature_engineering(train_clean, test_clean, face_attrs, audio_features, title_features, video_features):
    """节点特征工程主流程"""
    print("=" * 60)
    print("开始节点特征工程")
    print("=" * 60)

    # 1. 构建用户节点特征
    user_features, user_feature_dim, user_scaler = build_user_features(train_clean, test_clean)

    # 2. 处理视频多模态特征
    video_multimodal_features, feature_dims, vocab = process_video_multimodal_features(
        train_clean, test_clean, face_attrs, audio_features, title_features, video_features
    )

    # 3. 特征增强和最终处理
    final_features = enhance_and_finalize_features(
        user_features, video_multimodal_features, train_clean, feature_dims
    )

    print("\n" + "=" * 60)
    print("节点特征工程完成!")
    print("=" * 60)

    # 保存特征（可选）
    import pickle
    with open('node_features.pkl', 'wb') as f:
        pickle.dump(final_features, f)

    print("特征已保存到 node_features.pkl")

    return final_features


# 运行主流程
if __name__ == "__main__":
    # 假设这些变量来自第一步的输出
    train_data, test_data, face_attrs, audio_features, title_features, video_features=data_loader_explorer.load_and_explore_data("../resources/dataset/top10000/")

    data_loader_explorer.detailed_data_exploration(train_data, "训练集")
    data_loader_explorer.detailed_data_exploration(test_data, "训练集")
    data_loader_explorer.generate_data_quality_report(train_data, test_data, face_attrs, audio_features, title_features, video_features)

    data_cleaner_handler.clean_data(train_data, test_data)

    final_features = main_node_feature_engineering(
        train_data, test_data, face_attrs, audio_features, title_features, video_features
    )