import pandas as pd
import os
from pathlib import Path
import matplotlib.pyplot as plt
import seaborn as sns
from collections import defaultdict
from typing import Dict, Tuple, DefaultDict, List, Any
from matplotlib.figure import Figure
import pickle
from collections import defaultdict

# 设置中文显示
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

# 类型别名定义
UserId = str
ProcessName = str
DomainName = str
Count = int
UserProcessDict = DefaultDict[UserId, Dict[ProcessName, Count]]
UserDomainDict = DefaultDict[UserId, Dict[DomainName, Count]]
UserSessionDict = Dict[UserId, Count]


# ---------- 缓存路径 ----------
CACHE_DIR = Path("cache")
CACHE_DIR.mkdir(exist_ok=True)
PROCESS_CACHE = CACHE_DIR / "process_cache.pkl"
DOMAIN_CACHE = CACHE_DIR / "domain_cache.pkl"
SESSION_CACHE = CACHE_DIR / "session_cache.pkl"

def load_demographic_data() -> pd.DataFrame:
    demo_df = pd.read_csv("data/demographic.csv")
    demo_df['年龄'] = 2012 - demo_df['BIRTHDAY'].astype(int)
    
    age_bins = [0, 18, 25, 35, 45, 55, 100]
    age_labels = ['<18', '18-24', '25-34', '35-44', '45-54', '55+']
    demo_df['age_group'] = pd.cut(demo_df['年龄'], bins=age_bins, labels=age_labels)
    
    return demo_df

# ---------- 缓存处理模块 ----------
def load_cache(cache_path: Path) -> Any:
    """加载缓存数据"""
    if cache_path.exists():
        with open(cache_path, "rb") as f:
            return pickle.load(f)
    return None

def save_cache(data: Any, cache_path: Path) -> None:
    """保存缓存数据"""
    with open(cache_path, "wb") as f:
        pickle.dump(data, f)

def nested_dict() -> dict:
    """可序列化的嵌套字典工厂函数"""
    return defaultdict(int)


def process_behavior_directory(behavior_dir: Path):
    if all([PROCESS_CACHE.exists(), DOMAIN_CACHE.exists(), SESSION_CACHE.exists()]):
        print("加载缓存数据...")
        return (
            load_cache(PROCESS_CACHE),
            load_cache(DOMAIN_CACHE),
            load_cache(SESSION_CACHE)
        )
    
    user_process = defaultdict(nested_dict)
    user_domains = defaultdict(nested_dict)
    user_sessions: UserSessionDict = defaultdict(int)

    for file_path in behavior_dir.rglob("*.txt"):
        user_id = file_path.name.split("_")[0]
        user_sessions[user_id] += 1
        
        try:
            with open(file_path, "r", encoding="utf-8") as f:
                for line in f.readlines()[2:]:
                    fields = {}
                    for item in line.strip().split("[=]"):
                        k, v = item.split("<=>")
                        fields[k.strip()] = v.strip()
                    
                    if 'P' in fields:
                        process = fields['P'].lower()
                        user_process[user_id][process] += 1
                    
                    if 'U' in fields:
                        domain = extract_domain(fields['U'])
                        if domain:
                            user_domains[user_id][domain] += 1
        except Exception as e:
            print(f"处理文件 {file_path} 出错: {str(e)}")

    save_cache(user_process, PROCESS_CACHE)
    save_cache(user_domains, DOMAIN_CACHE)
    save_cache(user_sessions, SESSION_CACHE)
    
    return user_process, user_domains, user_sessions

def extract_domain(url: str) -> str:
    if not url:
        return ""
    
    clean_url = url.split("?")[0].split("://")[-1]
    parts = clean_url.split("/")[0].split(".")
    return ".".join(parts[-2:]) if len(parts) > 1 else clean_url

def create_process_dataframe(user_process) -> pd.DataFrame:
    records = []
    for user, processes in user_process.items():
        total = sum(processes.values())
        for proc, count in processes.items():
            records.append({
                "USERID": user,
                "process": proc,
                "count": count,
                "usage_ratio": count / total
            })
    return pd.DataFrame(records)

def create_domain_dataframe(user_domains) -> pd.DataFrame:
    records = []
    for user, domains in user_domains.items():
        total = sum(domains.values())
        for domain, count in domains.items():
            records.append({
                "USERID": user,
                "domain": domain,
                "count": count,
                "visit_ratio": count / total
            })
    return pd.DataFrame(records)

def plot_demographic_features(demo_df: pd.DataFrame) -> None:
    fig, axs = plt.subplots(3, 2, figsize=(18, 20))
    
    # 性别分布
    gender_counts = demo_df['GENDER'].value_counts()
    axs[0,0].pie(gender_counts, labels=gender_counts.index, autopct='%1.1f%%')
    axs[0,0].set_title("性别分布")
    
    # 年龄分布
    age_order = ['<18', '18-24', '25-34', '35-44', '45-54', '55+']
    age_counts = demo_df.groupby('age_group', observed=False).size().reindex(age_order).fillna(0)
    age_counts.plot(kind='bar', ax=axs[0,1], color='skyblue')
    axs[0,1].set_title("年龄分布")
    axs[0,1].set_xlabel("")
    
    # 教育程度
    edu_order = ['初中', '高中/中专/技校', '大学专科', '大学本科', '硕士及以上']
    edu_counts = demo_df['EDU'].value_counts().reindex(edu_order).dropna()
    edu_counts.plot(kind='barh', ax=axs[1,0], color='salmon')
    axs[1,0].set_title("教育程度分布")
    
    # 收入分布
    income_order = [
        '无收入', '1000元以下', '1001～1500元', '1501～2000元', 
        '2001～2500元', '2501～3000元', '3001～4000元', 
        '4001～5000元', '5001～6000元', '6001～8000元', 
        '8001～10000元', '10000元以上'
    ]
    income_counts = demo_df['INCOME'].value_counts().reindex(income_order).dropna()
    income_counts.plot(kind='bar', ax=axs[1,1], color='lightgreen')
    axs[1,1].set_title("收入分布")
    axs[1,1].tick_params(axis='x', rotation=45)
    
    # 职业分布（Top10）
    top_jobs = demo_df['JOB'].value_counts().head(10)
    top_jobs.plot(kind='barh', ax=axs[2,0], color='gold')
    axs[2,0].set_title("职业分布 Top10")
    
    # 省份分布（Top10）
    top_provinces = demo_df['PROVINCE'].value_counts().head(10)
    top_provinces.plot(kind='barh', ax=axs[2,1], color='violet')
    axs[2,1].set_title("省份分布 Top10")
    
    plt.tight_layout()
    return fig

def plot_behavior_features(process_df: pd.DataFrame, domain_df: pd.DataFrame, demo_df: pd.DataFrame) -> Figure:
    """行为特征可视化"""
    merged_df = pd.merge(
        process_df,
        demo_df[['USERID', 'GENDER', 'age_group', 'INCOME']],
        on='USERID'
    )
    
    fig, axs = plt.subplots(2, 2, figsize=(18, 14))
    
    # 最常用进程Top10
    top_processes = process_df.groupby('process')['count'].sum().nlargest(10)
    top_processes.plot(kind='barh', ax=axs[0,0], color='teal')
    axs[0,0].set_title("最常用进程 Top10")
    
    # 不同性别浏览器使用对比
    browser_df = merged_df[merged_df['process'].str.contains('chrome|firefox|iexplore|edge', case=False)]
    sns.barplot(
        x='process', 
        y='count', 
        hue='GENDER',
        data=browser_df,
        estimator=sum,
        ax=axs[0,1]
    )
    axs[0,1].set_title("不同性别浏览器使用对比")
    axs[0,1].tick_params(axis='x', rotation=45)
    
    # 各年龄段常用软件
    age_process = merged_df.groupby(['age_group', 'process'],observed = False)['count'].sum().reset_index()
    top_age_process = age_process.loc[age_process.groupby('age_group')['count'].idxmax()]
    sns.barplot(
        x='age_group', 
        y='count', 
        hue='process',
        data=top_age_process,
        ax=axs[1,0],
        palette='Set2'
    )
    axs[1,0].set_title("各年龄段最常用软件")
    
    # 收入与网站访问关系
    income_domain = pd.merge(
        domain_df,
        demo_df[['USERID', 'INCOME']],
        on='USERID'
    )
    top_domains = income_domain.groupby(['INCOME', 'domain'])['count'].sum().groupby('INCOME').nlargest(3)
    top_domains.unstack().plot(kind='bar', stacked=True, ax=axs[1,1], colormap='tab20')
    axs[1,1].set_title("不同收入群体的网站访问偏好")
    axs[1,1].tick_params(axis='x', rotation=45)
    axs[1,1].legend(bbox_to_anchor=(1.05, 1), loc='upper left')
    
    plt.tight_layout()
    return fig

def generate_user_profiles(demo_df: pd.DataFrame, process_df: pd.DataFrame, domain_df: pd.DataFrame) -> pd.DataFrame:
    process_mode = process_df.groupby('USERID')['process'].agg(lambda x: x.mode()[0] if not x.empty else '未知').rename('process')
    domain_mode = domain_df.groupby('USERID')['domain'].agg(lambda x: x.mode()[0] if not x.empty else '未知').rename('domain')
    
    merged_data = pd.merge(
        demo_df,
        process_mode,
        on='USERID'
    ).merge(
        domain_mode,
        on='USERID'
    )

    profile_groups = merged_data.groupby(['GENDER', 'age_group', 'EDU'], observed=True)
    
    profiles = profile_groups.agg(
        职业=('JOB', lambda x: x.mode()[0] if not x.empty else '未知'),
        收入=('INCOME', lambda x: x.mode()[0] if not x.empty else '未知'),
        主要应用=('process', lambda x: x.mode()[0] if not x.empty else '未知'),
        主要网站=('domain', lambda x: x.mode()[0] if not x.empty else '未知'),
        主要省份=('PROVINCE', lambda x: x.mode()[0] if not x.empty else '未知'),
        用户数量=('USERID', 'size') 
    ).reset_index()
    
    profiles = profiles.rename(columns={
        'GENDER': '性别',
        'age_group': '年龄组',
        'EDU': '教育程度'
    })

    profiles = profiles[profiles['用户数量'] > 0]

    return profiles.sort_values('用户数量', ascending=False)

def analyze():
    demo_df = load_demographic_data()
    
    user_process, user_domains, user_sessions = process_behavior_directory(Path("data/processed"))
    process_df = create_process_dataframe(user_process)
    domain_df = create_domain_dataframe(user_domains)
    
    fig1 = plot_demographic_features(demo_df)
    fig2 = plot_behavior_features(process_df, domain_df, demo_df)
    
    user_profiles = generate_user_profiles(demo_df, process_df, domain_df)
    return user_profiles,fig1,fig2

if __name__ == "__main__":
    analyze()
