from flask import Flask, render_template, request, jsonify, send_file
import jieba
import jieba.analyse
from snownlp import SnowNLP
import pandas as pd
import matplotlib.pyplot as plt
from wordcloud import WordCloud
import io
import base64
import json
from datetime import datetime, timedelta
import random
import os

app = Flask(__name__)

# 模拟数据生成
def generate_mock_data():
    """生成模拟的网络文本数据"""
    texts = [
        "新疆维吾尔自治区博物馆的文物展览真是太精彩了，特别是那些丝绸之路的文物",
        "今天参观了新疆博物馆，对新疆的历史文化有了更深的了解",
        "新疆博物馆的展品非常丰富，工作人员服务态度也很好",
        "新疆维吾尔自治区博物馆的数字化展示很有特色",
        "新疆博物馆的文创产品很有创意，买了几个纪念品",
        "新疆的历史文化在新疆博物馆得到了很好的展示",
        "新疆博物馆的展览设计很专业，值得再次参观",
        "新疆维吾尔自治区博物馆是了解新疆历史文化的重要窗口"
    ]
    
    data = []
    for i in range(100):
        text = random.choice(texts)
        date = datetime.now() - timedelta(days=random.randint(0, 30))
        source = random.choice(['微博', '新闻', '博客', '论坛', '微信公众号'])
        
        # 计算情感分数
        s = SnowNLP(text)
        sentiment = s.sentiments
        
        data.append({
            'id': i + 1,
            'text': text,
            'date': date.strftime('%Y-%m-%d'),
            'source': source,
            'sentiment': round(sentiment, 3)
        })
    
    return data

# 全局数据存储
mock_data = generate_mock_data()

@app.route('/')
def index():
    """首页路由"""
    return render_template('index.html')

@app.route('/dashboard')
def dashboard():
    """仪表板页面"""
    return render_template('dashboard.html')

@app.route('/api/data')
def get_data():
    """获取分析数据API"""
    # 获取查询参数
    start_date = request.args.get('start_date')
    end_date = request.args.get('end_date')
    source = request.args.get('source')
    keyword = request.args.get('keyword')
    
    # 过滤数据
    filtered_data = mock_data
    
    if start_date:
        filtered_data = [d for d in filtered_data if d['date'] >= start_date]
    if end_date:
        filtered_data = [d for d in filtered_data if d['date'] <= end_date]
    if source and source != 'all':
        filtered_data = [d for d in filtered_data if d['source'] == source]
    if keyword:
        filtered_data = [d for d in filtered_data if keyword in d['text']]
    
    return jsonify(filtered_data)

@app.route('/api/sentiment-trend')
def get_sentiment_trend():
    """获取情感趋势数据"""
    df = pd.DataFrame(mock_data)
    df['date'] = pd.to_datetime(df['date'])
    
    # 按日期聚合情感分数
    daily_sentiment = df.groupby('date')['sentiment'].mean().reset_index()
    daily_sentiment['date'] = daily_sentiment['date'].dt.strftime('%Y-%m-%d')
    
    return jsonify(daily_sentiment.to_dict('records'))

@app.route('/api/keywords')
def get_keywords():
    """获取关键词数据"""
    all_text = ' '.join([d['text'] for d in mock_data])
    
    # 使用jieba提取关键词
    keywords = jieba.analyse.extract_tags(all_text, topK=20, withWeight=True)
    
    # 转换为词云所需格式
    word_freq = {word: int(weight * 1000) for word, weight in keywords}
    
    return jsonify(word_freq)

@app.route('/api/wordcloud')
def get_wordcloud():
    """生成词云图片"""
    all_text = ' '.join([d['text'] for d in mock_data])
    
    # 生成词云
    wordcloud = WordCloud(
        width=800,
        height=400,
        background_color='white',
        font_path='simhei.ttf' if os.path.exists('simhei.ttf') else None,
        max_words=50
    ).generate(all_text)
    
    # 保存为图片
    img = io.BytesIO()
    plt.figure(figsize=(10, 5))
    plt.imshow(wordcloud, interpolation='bilinear')
    plt.axis('off')
    plt.savefig(img, format='png', bbox_inches='tight', dpi=100)
    plt.close()
    
    img.seek(0)
    return send_file(img, mimetype='image/png')

@app.route('/api/sources')
def get_sources():
    """获取数据来源统计"""
    df = pd.DataFrame(mock_data)
    source_counts = df['source'].value_counts().to_dict()
    
    return jsonify(source_counts)

@app.route('/api/report')
def generate_report():
    """生成分析报告"""
    # 计算基本统计
    total_texts = len(mock_data)
    avg_sentiment = sum([d['sentiment'] for d in mock_data]) / total_texts
    
    # 情感分布
    positive = len([d for d in mock_data if d['sentiment'] > 0.6])
    neutral = len([d for d in mock_data if 0.4 <= d['sentiment'] <= 0.6])
    negative = len([d for d in mock_data if d['sentiment'] < 0.4])
    
    # 来源分布
    sources = {}
    for d in mock_data:
        sources[d['source']] = sources.get(d['source'], 0) + 1
    
    report = {
        'total_texts': total_texts,
        'avg_sentiment': round(avg_sentiment, 3),
        'sentiment_distribution': {
            'positive': positive,
            'neutral': neutral,
            'negative': negative
        },
        'sources': sources,
        'generated_at': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    }
    
    return jsonify(report)

if __name__ == '__main__':
    app.run(debug=True, host='0.0.0.0', port=5000)
