import pandas as pd
import numpy as np
import jieba
import re
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, precision_recall_fscore_support, confusion_matrix
import torch
from transformers import BertTokenizer, BertModel
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
import spacy
import networkx as nx
from sklearn.decomposition import PCA
import warnings
from gensim import corpora, models
import pyLDAvis.gensim_models
import pyLDAvis
from datetime import datetime
import matplotlib.dates as mdates
from wordcloud import WordCloud
warnings.filterwarnings('ignore')

# 配置matplotlib中文显示
plt.rcParams['font.sans-serif'] = ['SimHei']  # 设置中文字体
plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题

# 定义停用词列表
STOP_WORDS = {'不是', '而且', '这样', '点', '送', '以后', '能', '一天', '什么', '这个', '一下', '后', '然后', '多', '东西', '有点', '真的', '非常', '可以', '还', '就是', '没', '不过', '其他', '没有', '比较', '希望', '一个', '会', '要', '还是', '才', '比', '知道', '不', '的', '了', '和', '是', '就', '都', '而', '及', '与', '这', '那', '有', '在', '中', '上', '下', '个', '为', '以', '到', '说', '对', '等', '着', '也', '但', '并', '很', '又', '或', '把', '让', '给', '从', '向', '被', '它', '她', '他', '我', '你', '们', '它', '她', '他', '我', '你', '们', '啊', '吧', '呢', '吗', '哦', '哈', '啦', '呀', '嘛', '呗', '喽', '咚', '咦', '唉', '哎', '嗨', '喂', '嗯', '哼', '嘿', '嗨', '哟', '呵', '嘻', '哇', '咦', '唉', '哎', '嗨', '喂', '嗯', '哼', '嘿', '嗨', '哟', '呵', '嘻', '哇'}

# 读取数据
df = pd.read_csv('京东评论数据.csv')

# 数据预处理函数
def clean_text(text):
    """
    清洗文本数据
    """
    if isinstance(text, str):
        # 移除特殊字符和表情
        text = re.sub(r'[^\w\s]', '', text)
        # 移除数字
        text = re.sub(r'\d+', '', text)
        return text.strip()
    return ''

# 清洗文本数据
df['cleaned_content'] = df['content'].apply(clean_text)

# 分词函数定义
def jieba_cut(text):
    """
    使用jieba精确模式分词，并去除停用词
    """
    words = list(jieba.cut(text))
    return [word for word in words if word not in STOP_WORDS]

def jieba_cut_for_search(text):
    """
    使用jieba搜索引擎模式分词，并去除停用词
    """
    words = list(jieba.cut_for_search(text))
    return [word for word in words if word not in STOP_WORDS]

# 应用分词
df['jieba_words'] = df['cleaned_content'].apply(jieba_cut)
df['jieba_search_words'] = df['cleaned_content'].apply(jieba_cut_for_search)

# 词云图生成函数
def generate_wordcloud(words_list, title, save_path):
    """
    生成词云图
    """
    # 将词列表转换为文本
    text = ' '.join([word for words in words_list for word in words])
    
    # 创建词云对象
    wordcloud = WordCloud(
        font_path='simhei.ttf',  # 使用黑体字体
        width=800,
        height=400,
        background_color='white',
        max_words=100,
        max_font_size=150,
        random_state=42
    )
    
    # 生成词云
    wordcloud.generate(text)
    
    # 创建图形
    plt.figure(figsize=(12, 6))
    plt.imshow(wordcloud, interpolation='bilinear')
    plt.axis('off')
    plt.title(title, fontsize=14, pad=15)
    plt.tight_layout()
    plt.savefig(save_path, dpi=300, bbox_inches='tight')
    plt.close()

# 用户等级词频分析函数
def analyze_user_level_word_frequency():
    """
    按用户等级分析词频分布
    """
    # 获取所有用户等级
    user_levels = df['user_level_name'].unique()
    
    # 为每个用户等级创建词云图
    for level in user_levels:
        # 获取该等级用户的评论
        level_comments = df[df['user_level_name'] == level]
        
        # 精确模式分词
        level_words_precise = level_comments['cleaned_content'].apply(jieba_cut)
        # 搜索引擎模式分词
        level_words_search = level_comments['cleaned_content'].apply(jieba_cut_for_search)
        
        # 生成词云图
        generate_wordcloud(level_words_precise, f'{level}用户-精确模式词云图', f'词云图-{level}用户-精确模式.png')
        generate_wordcloud(level_words_search, f'{level}用户-搜索引擎模式词云图', f'词云图-{level}用户-搜索引擎模式.png')
        
        # 保存该等级用户的评论数据
        level_comments.to_csv(f'processed_jd_comments_{level}.csv', index=False)
        
        # 输出统计信息
        print(f"\n{level}用户统计信息：")
        print(f"评论数量：{len(level_comments)}")
        print(f"平均评分：{level_comments['score'].mean():.2f}")
        print(f"评论字数统计：")
        print(level_comments['cleaned_content'].str.len().describe())
        
        # 输出分词模式对比
        print(f"\n{level}用户分词模式对比：")
        precise_words = set([word for words in level_words_precise for word in words])
        search_words = set([word for words in level_words_search for word in words])
        print("精确模式独有词：", precise_words - search_words)
        print("搜索引擎模式独有词：", search_words - precise_words)

# 文本向量化
# 1. TF-IDF向量化
tfidf = TfidfVectorizer(max_features=1000)
tfidf_matrix = tfidf.fit_transform(df['cleaned_content'])

# 2. BERT向量化
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
bert_model = BertModel.from_pretrained('bert-base-chinese')

# 自定义数据集类
class TextDataset(Dataset):
    """
    文本数据集类，用于BERT模型训练
    """
    def __init__(self, texts, tokenizer, max_len=128):
        self.texts = texts
        self.tokenizer = tokenizer
        self.max_len = max_len
        
    def __len__(self):
        return len(self.texts)
    
    def __getitem__(self, idx):
        text = str(self.texts[idx])
        encoding = self.tokenizer.encode_plus(
            text,
            add_special_tokens=True,
            max_length=self.max_len,
            return_token_type_ids=False,
            padding='max_length',
            truncation=True,
            return_attention_mask=True,
            return_tensors='pt'
        )
        
        return {
            'input_ids': encoding['input_ids'].flatten(),
            'attention_mask': encoding['attention_mask'].flatten()
        }

# 创建数据集和数据加载器
dataset = TextDataset(df['cleaned_content'].values, tokenizer)
dataloader = DataLoader(dataset, batch_size=32)

# 获取BERT向量表示
def get_bert_embeddings(model, dataloader):
    """
    获取文本的BERT向量表示
    """
    model.eval()
    embeddings = []
    with torch.no_grad():
        for batch in dataloader:
            input_ids = batch['input_ids']
            attention_mask = batch['attention_mask']
            outputs = model(input_ids, attention_mask=attention_mask)
            embeddings.append(outputs.last_hidden_state[:, 0, :].numpy())
    return np.vstack(embeddings)

# 获取BERT向量
bert_embeddings = get_bert_embeddings(bert_model, dataloader)

# 双向LSTM模型定义
class BiLSTM(nn.Module):
    """
    双向LSTM模型，用于文本分类
    """
    def __init__(self, input_size, hidden_size, num_layers, num_classes):
        super(BiLSTM, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, bidirectional=True)
        self.fc = nn.Linear(hidden_size * 2, num_classes)
        self.attention = nn.Sequential(
            nn.Linear(hidden_size * 2, hidden_size),
            nn.Tanh(),
            nn.Linear(hidden_size, 1)
        )
        
    def forward(self, x):
        # 确保输入维度正确
        if len(x.shape) == 2:
            x = x.unsqueeze(1)
        
        batch_size = x.size(0)
        h0 = torch.zeros(self.num_layers * 2, batch_size, self.hidden_size).to(x.device)
        c0 = torch.zeros(self.num_layers * 2, batch_size, self.hidden_size).to(x.device)
        
        # LSTM前向传播
        out, _ = self.lstm(x, (h0, c0))
        
        # 注意力机制
        attention_weights = self.attention(out)
        attention_weights = torch.softmax(attention_weights, dim=1)
        out = torch.sum(out * attention_weights, dim=1)
        
        # 全连接层
        out = self.fc(out)
        return out, attention_weights

# LSTM模型训练和评估函数
def train_and_evaluate_lstm(X_train, X_test, y_train, y_test, input_size=1000, hidden_size=128, num_layers=2, num_classes=6):
    """
    训练和评估LSTM模型
    参数:
        X_train, X_test: 训练集和测试集特征
        y_train, y_test: 训练集和测试集标签
        input_size: 输入维度
        hidden_size: 隐藏层大小
        num_layers: LSTM层数
        num_classes: 分类类别数
    返回:
        模型评估结果
    """
    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")
    
    # 创建模型
    model = BiLSTM(input_size, hidden_size, num_layers, num_classes).to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    
    # 准备数据
    # 检查输入类型并相应处理
    if hasattr(X_train, 'toarray'):
        X_train_tensor = torch.FloatTensor(X_train.toarray()).to(device)
        X_test_tensor = torch.FloatTensor(X_test.toarray()).to(device)
    else:
        X_train_tensor = torch.FloatTensor(X_train).to(device)
        X_test_tensor = torch.FloatTensor(X_test).to(device)
    
    y_train_tensor = torch.LongTensor(y_train.values - 1).to(device)
    y_test_tensor = torch.LongTensor(y_test.values - 1).to(device)
    
    # 训练参数
    num_epochs = 10
    train_losses = []
    test_accuracies = []
    best_accuracy = 0
    patience = 3
    patience_counter = 0
    
    # 训练循环
    for epoch in range(num_epochs):
        model.train()
        total_loss = 0
        optimizer.zero_grad()
        
        # 前向传播
        outputs, _ = model(X_train_tensor)
        loss = criterion(outputs, y_train_tensor)
        
        # 反向传播
        loss.backward()
        optimizer.step()
        
        total_loss = loss.item()
        train_losses.append(total_loss)
        
        # 评估模型
        model.eval()
        with torch.no_grad():
            outputs, _ = model(X_test_tensor)
            _, predicted = torch.max(outputs.data, 1)
            accuracy = (predicted == y_test_tensor).sum().item() / y_test_tensor.size(0) * 100
            test_accuracies.append(accuracy)
            
            # 早停检查
            if accuracy > best_accuracy:
                best_accuracy = accuracy
                patience_counter = 0
            else:
                patience_counter += 1
                
            if patience_counter >= patience:
                print(f"Early stopping at epoch {epoch+1}")
                break
        
        print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {total_loss:.4f}, Accuracy: {accuracy:.2f}%')
    
    # 绘制训练过程
    plt.figure(figsize=(12, 5))
    
    # 绘制损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(train_losses, label='训练损失')
    plt.title('训练损失曲线')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.grid(True)
    
    # 绘制准确率曲线
    plt.subplot(1, 2, 2)
    plt.plot(test_accuracies, label='测试准确率')
    plt.title('测试准确率曲线')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')
    plt.legend()
    plt.grid(True)
    
    plt.tight_layout()
    plt.savefig('lstm_training_curves.png', dpi=300, bbox_inches='tight')
    plt.close()
    
    # 最终评估
    model.eval()
    with torch.no_grad():
        outputs, attention_weights = model(X_test_tensor)
        _, predicted = torch.max(outputs.data, 1)
        
        # 计算评估指标
        precision, recall, f1, _ = precision_recall_fscore_support(
            y_test_tensor.cpu().numpy(), 
            predicted.cpu().numpy(), 
            average='weighted'
        )
        
        # 绘制混淆矩阵
        plt.figure(figsize=(8, 6))
        cm = confusion_matrix(y_test_tensor.cpu().numpy(), predicted.cpu().numpy())
        sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
        plt.title('BiLSTM模型混淆矩阵')
        plt.xlabel('预测标签')
        plt.ylabel('真实标签')
        plt.savefig('lstm_confusion_matrix.png', dpi=300, bbox_inches='tight')
        plt.close()
        
        # 绘制注意力权重热力图
        plt.figure(figsize=(10, 6))
        attention_weights_mean = attention_weights.mean(dim=0).cpu().numpy()
        sns.heatmap(attention_weights_mean, cmap='YlOrRd')
        plt.title('BiLSTM注意力权重热力图')
        plt.xlabel('序列位置')
        plt.ylabel('样本')
        plt.savefig('lstm_attention_heatmap.png', dpi=300, bbox_inches='tight')
        plt.close()
    
    # 返回评估结果
    return {
        'accuracy': accuracy / 100,
        'precision': precision,
        'recall': recall,
        'f1': f1,
        'final_loss': train_losses[-1],
        'best_accuracy': best_accuracy / 100
    }

# 注意力权重可视化函数
def plot_attention_heatmap(attention_weights, text, title):
    """
    绘制注意力权重热力图
    """
    plt.figure(figsize=(10, 6))
    sns.heatmap(attention_weights, cmap='YlOrRd', annot=True, fmt='.2f')
    plt.title(title, fontsize=14, pad=15)
    plt.xlabel('注意力头', fontsize=12)
    plt.ylabel('序列位置', fontsize=12)
    plt.tight_layout()
    plt.savefig(f'{title}.png', dpi=300, bbox_inches='tight')
    plt.close()

# 依存句法树可视化函数
def plot_dependency_tree(text):
    """
    绘制依存句法树
    """
    nlp = spacy.load('zh_core_web_sm')
    doc = nlp(text)
    
    G = nx.Graph()
    for token in doc:
        G.add_node(token.text)
        if token.head.text != token.text:
            G.add_edge(token.head.text, token.text)
    
    plt.figure(figsize=(12, 8))
    pos = nx.spring_layout(G)
    nx.draw(G, pos, with_labels=True, node_color='lightblue', 
            node_size=1500, font_size=10, font_weight='bold')
    plt.title('依存句法树', fontsize=14, pad=15)
    plt.axis('on')
    plt.grid(True, linestyle='--', alpha=0.7)
    plt.tight_layout()
    plt.savefig('dependency_tree.png', dpi=300, bbox_inches='tight')
    plt.close()

# LDA主题分析函数
def perform_lda_analysis(texts, num_topics=5):
    """
    进行LDA主题分析
    """
    # 分词
    texts_cut = [jieba.lcut(text) for text in texts]
    
    # 创建词典
    dictionary = corpora.Dictionary(texts_cut)
    
    # 过滤极端频率的词
    dictionary.filter_extremes(no_below=2, no_above=0.5)
    
    # 创建语料库
    corpus = [dictionary.doc2bow(text) for text in texts_cut]
    
    # 训练LDA模型
    lda_model = models.LdaModel(
        corpus=corpus,
        id2word=dictionary,
        num_topics=num_topics,
        random_state=42,
        passes=10
    )
    
    # 可视化主题
    vis = pyLDAvis.gensim_models.prepare(lda_model, corpus, dictionary)
    pyLDAvis.save_html(vis, 'lda_visualization.html')
    
    # 获取每个文档的主题分布
    doc_topics = []
    for doc in corpus:
        topic_dist = lda_model.get_document_topics(doc)
        doc_topics.append(topic_dist)
    
    return lda_model, doc_topics

# 评论分布分析函数
def analyze_comment_distributions(df):
    """
    分析评论的评分分布、时间分布和评论长度与评分的关系
    """
    # 创建图形
    fig = plt.figure(figsize=(15, 10))
    
    # 1. 评分分布
    plt.subplot(2, 2, 1)
    sns.histplot(data=df, x='score', bins=5, discrete=True)
    plt.title('评分分布情况')
    plt.xlabel('评分')
    plt.ylabel('评论数量')
    plt.grid(True, linestyle='--', alpha=0.7)
    
    # 2. 评论发布时间分布
    plt.subplot(2, 2, 2)
    df['hour'] = pd.to_datetime(df['creation_time']).dt.hour
    df['day'] = pd.to_datetime(df['creation_time']).dt.day_name()
    time_dist = pd.crosstab(df['day'], df['hour'])
    sns.heatmap(time_dist, cmap='YlOrRd', annot=True, fmt='d')
    plt.title('评论发布时间分布')
    plt.xlabel('小时')
    plt.ylabel('星期')
    
    # 3. 评论长度与评分关系
    plt.subplot(2, 2, 3)
    df['comment_length'] = df['cleaned_content'].str.len()
    sns.boxplot(data=df, x='score', y='comment_length')
    plt.title('评论长度与评分关系')
    plt.xlabel('评分')
    plt.ylabel('评论长度')
    plt.grid(True, linestyle='--', alpha=0.7)
    
    # 4. 评论长度分布
    plt.subplot(2, 2, 4)
    sns.histplot(data=df, x='comment_length', bins=50)
    plt.title('评论长度分布')
    plt.xlabel('评论长度')
    plt.ylabel('评论数量')
    plt.grid(True, linestyle='--', alpha=0.7)
    
    plt.tight_layout()
    plt.savefig('comment_distributions.png', dpi=300, bbox_inches='tight')
    plt.close()
    
    # 输出统计信息
    print("\n评论分布统计信息：")
    print("\n评分统计：")
    print(df['score'].describe())
    print("\n评论长度统计：")
    print(df['comment_length'].describe())
    print("\n各评分段的评论数量：")
    print(df['score'].value_counts().sort_index())
    
    return df

# 时序分析函数
def analyze_temporal_patterns(df):
    """
    分析评论的时序模式
    """
    # 转换时间格式
    df['creation_time'] = pd.to_datetime(df['creation_time'])
    
    # 按日期统计评论数量
    daily_counts = df.groupby(df['creation_time'].dt.date).size()
    
    # 按日期统计平均评分
    daily_scores = df.groupby(df['creation_time'].dt.date)['score'].mean()
    
    # 创建时序图
    fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(15, 10))
    
    # 评论数量时序图
    ax1.plot(daily_counts.index, daily_counts.values, marker='o')
    ax1.set_title('每日评论数量变化')
    ax1.set_xlabel('日期')
    ax1.set_ylabel('评论数量')
    ax1.grid(True)
    ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
    plt.setp(ax1.xaxis.get_majorticklabels(), rotation=45)
    
    # 评分时序图
    ax2.plot(daily_scores.index, daily_scores.values, marker='o', color='orange')
    ax2.set_title('每日平均评分变化')
    ax2.set_xlabel('日期')
    ax2.set_ylabel('平均评分')
    ax2.grid(True)
    ax2.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
    plt.setp(ax2.xaxis.get_majorticklabels(), rotation=45)
    
    plt.tight_layout()
    plt.savefig('temporal_analysis.png', dpi=300, bbox_inches='tight')
    plt.close()
    
    # 按时间段进行LDA分析
    df['time_period'] = pd.qcut(df['creation_time'], q=4, labels=['Q1', 'Q2', 'Q3', 'Q4'])
    
    for period in df['time_period'].unique():
        period_texts = df[df['time_period'] == period]['cleaned_content'].tolist()
        lda_model, doc_topics = perform_lda_analysis(period_texts)
        
        # 保存每个时期的主题词
        topics_df = pd.DataFrame({
            'Topic': range(1, len(lda_model.print_topics()) + 1),
            'Keywords': [lda_model.print_topic(i) for i in range(len(lda_model.print_topics()))]
        })
        topics_df.to_csv(f'topics_period_{period}.csv', index=False, encoding='utf-8-sig')
    
    return daily_counts, daily_scores

# 主函数
def main():
    """
    主函数，执行所有分析流程
    """
    global df  # 声明df为全局变量
    
    # 示例：对第一条评论进行依存树分析
    if len(df) > 0:
        plot_dependency_tree(df['cleaned_content'].iloc[0])
    
    # 进行评论分布分析
    print("\n开始评论分布分析...")
    df = analyze_comment_distributions(df)
    
    # 进行用户等级分组分析
    analyze_user_level_word_frequency()
    
    # 进行LDA主题分析
    print("\n开始LDA主题分析...")
    lda_model, doc_topics = perform_lda_analysis(df['cleaned_content'].tolist())
    
    # 进行时序分析
    print("\n开始时序分析...")
    daily_counts, daily_scores = analyze_temporal_patterns(df)
    
    # 准备标签数据
    y = df['score']  # 使用评分作为标签
    
    # 1. 使用TF-IDF特征
    print("\n使用TF-IDF特征进行BiLSTM模型训练...")
    tfidf = TfidfVectorizer(max_features=1000)
    X_tfidf = tfidf.fit_transform(df['cleaned_content'])
    X_train_tfidf, X_test_tfidf, y_train, y_test = train_test_split(X_tfidf, y, test_size=0.2, random_state=42)
    
    # 训练和评估使用TF-IDF特征的BiLSTM模型
    lstm_results_tfidf = train_and_evaluate_lstm(X_train_tfidf, X_test_tfidf, y_train, y_test, input_size=1000)
    
    # 2. 使用BERT特征
    print("\n使用BERT特征进行BiLSTM模型训练...")
    # 获取BERT向量
    bert_embeddings = get_bert_embeddings(bert_model, dataloader)
    X_train_bert, X_test_bert, y_train, y_test = train_test_split(bert_embeddings, y, test_size=0.2, random_state=42)
    
    # 训练和评估使用BERT特征的BiLSTM模型
    lstm_results_bert = train_and_evaluate_lstm(X_train_bert, X_test_bert, y_train, y_test, input_size=768)  # BERT base的隐藏层维度是768
    
    # 保存评估结果
    results_df = pd.DataFrame({
        'Model': ['BiLSTM-TFIDF', 'BiLSTM-BERT'],
        'Accuracy': [lstm_results_tfidf['accuracy'], lstm_results_bert['accuracy']],
        'Precision': [lstm_results_tfidf['precision'], lstm_results_bert['precision']],
        'Recall': [lstm_results_tfidf['recall'], lstm_results_bert['recall']],
        'F1': [lstm_results_tfidf['f1'], lstm_results_bert['f1']],
        'Final_Loss': [lstm_results_tfidf['final_loss'], lstm_results_bert['final_loss']],
        'Best_Accuracy': [lstm_results_tfidf['best_accuracy'], lstm_results_bert['best_accuracy']]
    })
    results_df.to_csv('lstm_evaluation_results.csv', index=False)
    
    # 绘制模型对比图
    plt.figure(figsize=(12, 6))
    metrics = ['Accuracy', 'Precision', 'Recall', 'F1']
    x = np.arange(len(metrics))
    width = 0.35
    
    plt.bar(x - width/2, results_df.iloc[0][metrics], width, label='BiLSTM-TFIDF')
    plt.bar(x + width/2, results_df.iloc[1][metrics], width, label='BiLSTM-BERT')
    
    plt.xlabel('评估指标')
    plt.ylabel('得分')
    plt.title('BiLSTM模型性能对比')
    plt.xticks(x, metrics)
    plt.legend()
    plt.grid(True, linestyle='--', alpha=0.7)
    plt.tight_layout()
    plt.savefig('model_comparison.png', dpi=300, bbox_inches='tight')
    plt.close()
    
    # 保存处理后的数据
    df.to_csv('processed_jd_comments.csv', index=False)
    
    print("\n分析完成！所有结果已保存。")

if __name__ == "__main__":
    main() 