# -*- coding: utf-8 -*-
"""
用户画像分析系统 - 最终修正版

"""

# ==================== 1. 导入所有必需库 ====================
from collections import Counter
from wordcloud import WordCloud
import pandas as pd
import numpy as np
import jieba
import re
import matplotlib.pyplot as plt
from datetime import datetime
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import os

# 设置中文显示
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False


# ==================== 2. 数据加载与预处理 ====================
def load_and_clean_data():
    """加载并彻底清洗数据"""
    # 加载数据
    df_eval = pd.read_csv('warehouse_evaluate.csv')
    df_scenery = pd.read_csv('warehouse_scenery.csv')
    df_poi = pd.read_csv('warehouse_xian_poi.csv')

    # 清理用户名为空的数据
    df_eval = df_eval.dropna(subset=['user_name'])
    df_eval['user_name'] = df_eval['user_name'].astype(str)

    # 合并景点信息
    df_scenery = df_scenery.merge(
        df_poi[['name', 'tag', 'type', 'lng', 'lat']],
        left_on='scenery_name', right_on='name',
        how='left'
    ).drop(columns=['name'])

    # 处理评分数据
    df_scenery['score'] = pd.to_numeric(df_scenery['score'], errors='coerce')
    df_scenery = df_scenery.dropna(subset=['score'])

    # 处理评论时间
    df_eval['send_time'] = pd.to_datetime(df_eval['send_time'], errors='coerce')
    df_eval = df_eval.dropna(subset=['send_time'])
    df_eval['review_year'] = df_eval['send_time'].dt.year

    return df_eval, df_scenery


# ==================== 3. 用户画像构建 ====================
class UserProfiler:
    def __init__(self, df_eval, df_scenery):
        self.df_eval = df_eval
        self.df_scenery = df_scenery
        self.user_profiles = {}

    def _clean_text(self, text):
        """文本清洗函数"""
        if not isinstance(text, str):
            return ''
        # 移除特殊字符和标点
        text = re.sub(r'[^\w\s\u4e00-\u9fa5]', '', text)
        return text.strip()

    def extract_keywords(self, text):
        """安全的中文关键词提取"""
        text = self._clean_text(text)
        if not text:
            return ''

        try:
            words = [word for word in jieba.lcut(text)
                     if len(word) > 1 and re.match(r'^[\u4e00-\u9fa5]+$', word)]
            return ' '.join(words)
        except:
            return ''

    def analyze_interests(self):
        """安全的用户兴趣分析"""
        # 确保没有空用户名
        valid_users = self.df_eval[self.df_eval['user_name'].notna()]

        # 构建用户-兴趣矩阵
        user_keywords = valid_users.groupby('user_name')['content'].apply(
            lambda x: ' '.join([self.extract_keywords(text) for text in x if pd.notna(text)])
        )

        # 过滤空内容
        user_keywords = user_keywords[user_keywords.str.len() > 0]

        # TF-IDF向量化
        tfidf = TfidfVectorizer(
            max_features=100,
            token_pattern=r'(?u)\b\w+\b',
            min_df=2  # 忽略只出现一次的词语
        )
        try:
            X_tfidf = tfidf.fit_transform(user_keywords)

            # 兴趣聚类
            pca = PCA(n_components=2)
            X_pca = pca.fit_transform(X_tfidf.toarray())
            kmeans = KMeans(n_clusters=3, n_init='auto').fit(X_pca)

            # 可视化
            plt.figure(figsize=(10, 6))
            plt.scatter(X_pca[:, 0], X_pca[:, 1], c=kmeans.labels_, alpha=0.5)
            plt.title("用户兴趣聚类分布")
            plt.savefig('user_interest_clusters.png', bbox_inches='tight', dpi=300)

            return user_keywords, tfidf, kmeans
        except Exception as e:
            print(f"兴趣分析出错: {str(e)}")
            return pd.Series(dtype=object), None, None

    def analyze_consumption(self):
        """消费水平分析"""
        try:
            # 确保score列有效
            valid_data = self.df_scenery.dropna(subset=['score']).copy()
            valid_data['price_level'] = pd.qcut(
                valid_data['score'].rank(method='first'),
                q=3,
                labels=['低', '中', '高'],
                duplicates='drop'
            )

            # 合并有效数据
            user_price = self.df_eval.merge(
                valid_data[['scenery_name', 'price_level']],
                on='scenery_name',
                how='left'
            ).groupby('user_name')['price_level'].apply(
                lambda x: Counter(x.dropna()).most_common(1)[0][0] if not x.dropna().empty else '未知'
            )
            return user_price
        except Exception as e:
            print(f"消费分析出错: {str(e)}")
            return pd.Series(dtype=object)

    def generate_wordcloud(self, interests):
        """生成词云"""
        try:
            if not interests:
                print("无有效兴趣数据生成词云")
                return

            text = ' '.join([i for i in interests if i])
            if not text.strip():
                print("兴趣文本为空")
                return

            wc = WordCloud(
                font_path='simhei.ttf',
                width=800,
                height=400,
                background_color='white',
                collocations=False  # 避免重复词语
            ).generate(text)

            plt.figure(figsize=(12, 6))
            plt.imshow(wc, interpolation='bilinear')
            plt.axis('off')
            plt.savefig('user_interests_wordcloud.png', bbox_inches='tight', dpi=300)
        except Exception as e:
            print(f"生成词云出错: {str(e)}")

    def build_profiles(self):
        """构建完整用户画像"""
        # 各维度分析
        user_keywords, tfidf, kmeans = self.analyze_interests()
        user_price = self.analyze_consumption()

        # 情感分析
        pos_words = ['好', '美', '满意', '推荐', '喜欢']
        neg_words = ['差', '无聊', '贵', '失望', '坑']

        def sentiment_score(text):
            text = self._clean_text(text)
            if not text:
                return 0
            pos = sum(text.count(word) for word in pos_words)
            neg = sum(text.count(word) for word in neg_words)
            return (pos - neg) / (pos + neg + 1e-6)

        self.df_eval['sentiment'] = self.df_eval['content'].apply(sentiment_score)
        user_sentiment = self.df_eval.groupby('user_name')['sentiment'].mean()

        # 活跃度分析
        user_activity = self.df_eval.groupby('user_name').agg(
            total_reviews=('eid', 'count'),
            last_review=('send_time', 'max'),
            review_years=('review_year', lambda x: len(set(x)))
        )
        user_activity['days_since_last'] = (datetime.now() - user_activity['last_review']).dt.days

        # 整合画像
        all_interests = []
        for user in self.df_eval['user_name'].unique():
            try:
                # 兴趣关键词
                if user in user_keywords.index and tfidf:
                    feature_indices = tfidf.transform([user_keywords[user]]).nonzero()[1]
                    interests = dict(Counter(
                        tfidf.get_feature_names_out()[i] for i in feature_indices
                    ).most_common(5))
                else:
                    interests = {}

                # 完整画像
                profile = {
                    'interests': interests,
                    'interest_cluster': int(kmeans.labels_[user_keywords.index.get_loc(user)])
                    if user in user_keywords.index and kmeans else -1,
                    'price_level': user_price.get(user, '未知'),
                    'sentiment': user_sentiment.get(user, 0),
                    'activity': {
                        'total_reviews': int(user_activity.loc[user, 'total_reviews']),
                        'active_years': int(user_activity.loc[user, 'review_years']),
                        'days_since_last': int(user_activity.loc[user, 'days_since_last'])
                    } if user in user_activity.index else {}
                }
                self.user_profiles[user] = profile

                # 收集兴趣词
                if interests:
                    all_interests.extend(interests.keys())
            except Exception as e:
                print(f"处理用户 {user} 时出错: {str(e)}")
                continue

        # 生成词云
        self.generate_wordcloud(all_interests)

        # 保存结果
        if self.user_profiles:
            pd.DataFrame.from_dict(self.user_profiles, orient='index').to_csv('user_profiles.csv')
            print(f"成功生成 {len(self.user_profiles)} 个用户画像")
        else:
            print("警告: 未生成任何有效用户画像")


# ==================== 4. 主执行流程 ====================
if __name__ == "__main__":
    # 加载并清洗数据
    df_eval, df_scenery = load_and_clean_data()

    # 检查数据有效性
    if df_eval.empty or df_scenery.empty:
        print("错误: 数据加载失败或数据为空")
    else:
        print(f"有效用户数据: {len(df_eval)} 条")
        print(f"有效景点数据: {len(df_scenery)} 条")

        # 构建用户画像
        profiler = UserProfiler(df_eval, df_scenery)
        profiler.build_profiles()