#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
B站每周必看數據分析完整程序
整合爬蟲、數據清洗、分析、機器學習和可視化功能
"""

import requests
import json
import time
import pandas as pd
import numpy as np
import re
import os
import sys
import logging
from datetime import datetime
from tqdm import tqdm
import warnings
import concurrent.futures
import threading
import random
import matplotlib.pyplot as plt
import seaborn as sns
warnings.filterwarnings('ignore')

# 启动时友好提示
print("\n" + "="*60)
print("🔧 B站每周必看數據分析程序")
print("="*60)
print("⚠️  重要提醒：如遇API返回-352錯誤，請更新你的B站Cookie！")
print("📋 獲取新Cookie步驟：")
print("   1. 用瀏覽器登錄網頁版B站 (https://www.bilibili.com)")
print("   2. 按F12打開開發者工具")
print("   3. 點擊 'Application' 或 '應用' 標籤")
print("   4. 左側找到 'Storage' > 'Cookies' > 'https://www.bilibili.com'")
print("   5. 找到並複製 'SESSDATA' 字段的值")
print("   6. 將新Cookie替換代碼中的舊Cookie")
print("   7. 重新運行程式")
print("="*60)
print("💡 提示：如果仍然失敗，請等待10-30分鐘後再試\n")

# 自動檢測當前目錄
project_files = ['bilibili_analysis_complete.py', 'requirements.txt']
missing = [f for f in project_files if not os.path.exists(f)]
if missing:
    print("【錯誤】當前目錄缺少以下文件：", missing)
    print("請先進入你的項目目錄再運行本程序！")
    print("例如：cd C:\\Users\\a1780\\bilibili_weekly_analysis")
    sys.exit(1)

# 設置日誌
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

class BilibiliWeeklyAnalyzer:
    def __init__(self):
        """初始化分析器"""
        self.session = requests.Session()
        
        # 隨機 User-Agent 列表
        user_agents = [
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:90.0) Gecko/20100101 Firefox/90.0',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Edge/91.0.864.59'
        ]
        
        selected_ua = random.choice(user_agents)
        
        self.session.headers.update({
            'User-Agent': selected_ua,
            'Referer': 'https://www.bilibili.com/',
            'Accept': 'application/json, text/plain, */*',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Sec-Fetch-Dest': 'empty',
            'Sec-Fetch-Mode': 'cors',
            'Sec-Fetch-Site': 'same-origin',
            # ★★★ 重要：請將下行中的Cookie內容替換為你自己的B站登錄Cookie ★★★
            'Cookie': 'buvid3=367C7A74-C875-2FEB-8F80-4DFDF9338BAE63131infoc; b_nut=1729076263; _uuid=4CDFE1F1-210510-8B610-9FF5-104C31B265EB172230infoc; enable_web_push=DISABLE; buvid4=3A8EACB6-0423-33FB-0019-B47367A5447564750-024101610-777tDx3lv8hCOSFVfOWtnQ%3D%3D; DedeUserID=507857399; DedeUserID__ckMd5=bfddae2bdf917643; rpdid=|(J|Ykl~l|Y~0J\'u~km|~Yl~|; fingerprint=e75cd9a6efed828c4fba3c2427508195; buvid_fp_plain=undefined; buvid_fp=e75cd9a6efed828c4fba3c2427508195; hit-dyn-v2=1; enable_feed_channel=ENABLE; LIVE_BUVID=AUTO3917444656580234; header_theme_version=OPEN; theme-tip-show=SHOWED; theme-avatar-tip-show=SHOWED; blackside_state=0; CURRENT_BLACKGAP=0; bili_ticket=eyJhbGciOiJIUzI1NiIsImtpZCI6InMwMyIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3NTI1ODY3MDksImlhdCI6MTc1MjMyNzQ0OSwicGx0IjotMX0.lZyQ58hqydaqahzlqQhgx7oSzSyOhEZXS3_fFbno0Qw; bili_ticket_expires=1752586649; SESSDATA=27e4135a%2C1768008780%2C5bd75%2A72CjCfZiMzJe4iapNFxCkLwX3WgjZKC4Wec3QE9vMaek-HgSMgveWrQJ-yBniUFYsFv8sSVkVTN0hvMGRjZkZDekhkcHgycXVUVXhEeWRONl9WOEhvUUlWWHo1MDJ5QzBDY2w4TExsUjlPbEdKWENTRFhBYUhoVTB3M2pzcll3ckd5cjVWZDNCS1lnIIEC; bili_jct=2dc5fe92c2f28ecc47200014c291f0ae; sid=8tuwb436; home_feed_column=4; browser_resolution=1280-693; bp_t_offset_507857399=1089408649220063232; CURRENT_FNVAL=4048; CURRENT_QUALITY=80; b_lsid=7D6DBC410_1980B9C7165'
        })
        
        logger.info(f"使用 User-Agent: {selected_ua[:50]}...")
        
        # 創建必要的目錄
        self.directories = ['data', 'cleaned_data', 'analysis_results', 'ml_models', 'visualizations']
        for directory in self.directories:
            if not os.path.exists(directory):
                os.makedirs(directory)
                logger.info(f"創建目錄: {directory}")
        
        # 添加線程鎖
        self.lock = threading.Lock()
    
    def get_video_info(self, video, number, subject):
        """獲取單個視頻信息"""
        try:
            video_url = "https://api.bilibili.com/x/web-interface/view"
            video_response = self.session.get(video_url, params={'bvid': video['bvid']}, timeout=5)
            video_response.raise_for_status()
            video_data = video_response.json()
            if video_data['code'] != 0:
                return None
            video_info = video_data['data']
            stat = video_info['stat']
            video_record = {
                'bvid': video['bvid'],
                'aid': video_info['aid'],
                'title': video_info['title'],
                'desc': video_info['desc'],
                'duration': video_info['duration'],
                'view': stat['view'],
                'danmaku': stat['danmaku'],
                'reply': stat['reply'],
                'favorite': stat['favorite'],
                'coin': stat['coin'],
                'share': stat['share'],
                'like': stat['like'],
                'dislike': stat.get('dislike', 0),
                'owner_name': video_info['owner']['name'],
                'owner_mid': video_info['owner']['mid'],
                'pubdate': video_info['pubdate'],
                'ctime': video_info['ctime'],
                'weekly_number': number,
                'weekly_title': subject,
                'crawl_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            }
            with self.lock:
                logger.info(f"成功爬取視頻: {video_record['title'][:30]}...")
            return video_record
        except Exception as e:
            logger.error(f"處理視頻 {video.get('bvid', 'unknown')} 時出錯: {e}")
            return None
    
    def crawl_weekly_data(self):
        """平衡模式：期數單線程，視頻詳情多線程（8個），隨機延遲，速度快且較穩定"""
        logger.info("=" * 50)
        logger.info("步驟 1: 開始平衡模式爬取B站每周必看所有期數數據")
        logger.info("=" * 50)
        all_videos = []
        no_data_numbers = []
        api_endpoints = [
            "https://api.bilibili.com/x/web-interface/popular/series/list",
            "https://api.bilibili.com/x/web-interface/popular/series/list?ps=50",
            "https://api.bilibili.com/x/web-interface/popular/series/list?ps=100"
        ]
        number_list = []
        for endpoint in api_endpoints:
            try:
                resp = self.session.get(endpoint, timeout=15)
                try:
                    if not resp.text or resp.text.strip() == "":
                        logger.warning(f"API端點 {endpoint} 返回空內容，重試...")
                        continue
                    data = resp.json()
                except Exception as e:
                    logger.warning(f"API端點 {endpoint} 返回異常內容: {e}")
                    continue
                if data['code'] == 0 and 'data' in data and 'list' in data['data']:
                    number_list = [(item['number'], item.get('subject', '')) for item in data['data']['list']]
                    logger.info(f"成功獲取到{len(number_list)}期，每期標題示例：{number_list[:3]}")
                    break
                else:
                    logger.warning(f"API端點 {endpoint} 返回異常: {data}")
                    continue
            except Exception as e:
                logger.warning(f"API端點 {endpoint} 請求失敗: {e}")
                continue
        if not number_list:
            logger.error("所有API端點都失敗，無法獲取期數列表")
            return None
        total_processed = 0
        valid_episodes = 0
        for number, subject in tqdm(number_list, desc="爬取期數進度"):
            total_processed += 1
            success = False
            episode_apis = [
                f"https://api.bilibili.com/x/web-interface/popular/series/one?number={number}&page_size=20",
                f"https://api.bilibili.com/x/web-interface/popular/series/one?number={number}&page_size=50",
                f"https://api.bilibili.com/x/web-interface/popular/series/one?number={number}&page_size=100"
            ]
            for attempt in range(8):  # 最多重試8次
                for api_url in episode_apis:
                    try:
                        response = self.session.get(api_url, timeout=15)
                        if response.status_code != 200:
                            logger.warning(f"第{number}期請求失敗（第{attempt+1}次），狀態碼: {response.status_code}")
                            time.sleep(2 ** min(attempt, 5))
                            continue
                        if not response.text or response.text.strip() == "":
                            logger.warning(f"第{number}期API返回空內容（第{attempt+1}次），重試...")
                            time.sleep(2 ** min(attempt, 5))
                            continue
                        if response.text.strip().startswith("<!DOCTYPE html>") or "<html" in response.text.lower():
                            logger.error(f"第{number}期API返回HTML頁面，可能被風控/需要驗證，已保存到 html_error_{number}.html")
                            with open(f"html_error_{number}.html", "w", encoding="utf-8") as f:
                                f.write(response.text)
                            time.sleep(2 ** min(attempt, 5))
                            continue
                        try:
                            data = response.json()
                        except Exception as e:
                            logger.error(f"第{number}期API返回非JSON內容（第{attempt+1}次），已保存到 error_{number}.txt")
                            with open(f"error_{number}.txt", "w", encoding="utf-8") as f:
                                f.write(response.text)
                            time.sleep(2 ** min(attempt, 5))
                            continue
                        if data['code'] != 0:
                            if data['code'] == -352:
                                logger.warning(f"第{number}期API返回-352錯誤（第{attempt+1}次），可能是Cookie過期或被限流")
                                if attempt < 2:
                                    logger.info("等待30秒後重試...")
                                    time.sleep(30)
                                else:
                                    logger.warning("連續-352錯誤，建議更新Cookie或稍後再試")
                                    time.sleep(60)
                            else:
                                logger.warning(f"第{number}期API返回錯誤碼: {data['code']}, 消息: {data.get('message', '')}")
                                time.sleep(2 ** min(attempt, 5))
                            continue
                        if not data.get('data', {}).get('list'):
                            if attempt == 7:
                                logger.warning(f"第{number}期暫未上線或無數據，請稍後再試")
                            else:
                                logger.warning(f"第{number}期無數據（第{attempt+1}次），重試...")
                            time.sleep(2 ** min(attempt, 5))
                            continue
                        success = True
                        valid_episodes += 1
                        weekly_data = data['data']
                        logger.info(f"第{number}期獲取到 {len(weekly_data['list'])} 個視頻")
                        def fetch_video_detail(video):
                            try:
                                video_apis = [
                                    f"https://api.bilibili.com/x/web-interface/view?bvid={video['bvid']}",
                                    f"https://api.bilibili.com/x/web-interface/view?aid={video.get('aid', '')}"
                                ]
                                video_info = None
                                for video_api in video_apis:
                                    try:
                                        video_response = self.session.get(video_api, timeout=10)
                                        video_response.raise_for_status()
                                        video_data = video_response.json()
                                        if video_data['code'] == 0:
                                            video_info = video_data['data']
                                            break
                                    except Exception:
                                        continue
                                if not video_info:
                                    logger.warning(f"無法獲取視頻 {video.get('bvid', 'unknown')} 的詳細信息")
                                    return None
                                stat = video_info['stat']
                                video_record = {
                                    'bvid': video['bvid'],
                                    'aid': video_info['aid'],
                                    'title': video_info['title'],
                                    'desc': video_info['desc'],
                                    'duration': video_info['duration'],
                                    'view': stat['view'],
                                    'danmaku': stat['danmaku'],
                                    'reply': stat['reply'],
                                    'favorite': stat['favorite'],
                                    'coin': stat['coin'],
                                    'share': stat['share'],
                                    'like': stat['like'],
                                    'dislike': stat.get('dislike', 0),
                                    'owner_name': video_info['owner']['name'],
                                    'owner_mid': video_info['owner']['mid'],
                                    'pubdate': video_info['pubdate'],
                                    'ctime': video_info['ctime'],
                                    'weekly_number': number,
                                    'weekly_title': subject,
                                    'crawl_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                                }
                                logger.info(f"成功爬取視頻: {video_record['title'][:30]}...")
                                time.sleep(random.uniform(0.2, 1.0))
                                return video_record
                            except Exception as e:
                                logger.error(f"處理視頻 {video.get('bvid', 'unknown')} 時出錯: {e}")
                                return None
                        with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:
                            results = list(executor.map(fetch_video_detail, weekly_data['list']))
                        for video_record in results:
                            if video_record:
                                all_videos.append(video_record)
                        break
                    except Exception as e:
                        logger.error(f"爬取第{number}期時出錯（第{attempt+1}次）: {e}")
                        time.sleep(2 ** min(attempt, 5))
                        continue
                if success:
                    break
            if not success:
                no_data_numbers.append(number)
            if valid_episodes % 10 == 0:
                logger.info(f"進度統計: 已處理{total_processed}期，有效{valid_episodes}期，總視頻{len(all_videos)}個")
            if total_processed % 20 == 0:
                logger.info("已處理20期，休息10秒避免被限流...")
                time.sleep(10)
            else:
                time.sleep(0.5)
        logger.info(f"爬取完成！總處理{total_processed}期，有效{valid_episodes}期，總視頻{len(all_videos)}個")
        if no_data_numbers:
            logger.warning(f"以下期數連續5次無數據或暫未上線，請人工核查: {no_data_numbers}")
            with open('no_data_numbers.txt', 'w', encoding='utf-8') as f:
                f.write(','.join(str(n) for n in no_data_numbers))
        if all_videos:
            df = pd.DataFrame(all_videos)
            filename = f"weekly_data_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv"
            filepath = os.path.join('data', filename)
            df.to_csv(filepath, index=False, encoding='utf-8-sig')
            logger.info(f"原始數據已保存: {filepath}")
            return df
        else:
            logger.warning("未獲取到任何數據，請檢查網絡連接和Cookie設置！")
            return None
    
    def clean_data(self, df):
        """數據清洗"""
        logger.info("=" * 50)
        logger.info("步驟 2: 開始數據清洗")
        logger.info("=" * 50)
        
        if df is None or len(df) == 0:
            logger.error("無數據可清洗")
            return None
        
        # 移除重複數據
        original_len = len(df)
        df = df.drop_duplicates(subset=['bvid'], keep='first')
        removed_count = original_len - len(df)
        logger.info(f"移除重複數據: {removed_count} 條")
        
        # 處理缺失值
        numeric_cols = ['view', 'danmaku', 'reply', 'favorite', 'coin', 'share', 'like', 'dislike']
        for col in numeric_cols:
            if col in df.columns:
                df[col] = df[col].fillna(0)
        
        text_cols = ['title', 'desc', 'owner_name']
        for col in text_cols:
            if col in df.columns:
                df[col] = df[col].fillna('')
        
        # 清洗文本數據
        def clean_text(text):
            if pd.isna(text) or text == '':
                return ''
            text = re.sub(r'<[^>]+>', '', str(text))
            text = re.sub(r'[^\w\s\u4e00-\u9fff]', '', text)
            text = re.sub(r'\s+', ' ', text).strip()
            return text
        
        for col in text_cols:
            if col in df.columns:
                df[col] = df[col].apply(clean_text)
        
        # 轉換時間戳
        if 'pubdate' in df.columns:
            df['pubdate'] = pd.to_datetime(df['pubdate'], unit='s')
            df['pubdate_str'] = df['pubdate'].dt.strftime('%Y-%m-%d %H:%M:%S')
        
        if 'ctime' in df.columns:
            df['ctime'] = pd.to_datetime(df['ctime'], unit='s')
            df['ctime_str'] = df['ctime'].dt.strftime('%Y-%m-%d %H:%M:%S')
        
        # 計算衍生特徵
        df['interaction_rate'] = np.where(
            df['view'] > 0,
            (df['like'] + df['coin'] + df['favorite']) / df['view'],
            0
        )
        
        df['like_rate'] = np.where(df['view'] > 0, df['like'] / df['view'], 0)
        df['coin_rate'] = np.where(df['view'] > 0, df['coin'] / df['view'], 0)
        df['favorite_rate'] = np.where(df['view'] > 0, df['favorite'] / df['view'], 0)
        df['share_rate'] = np.where(df['view'] > 0, df['share'] / df['view'], 0)
        df['danmaku_density'] = np.where(df['view'] > 0, df['danmaku'] / df['view'], 0)
        df['reply_density'] = np.where(df['view'] > 0, df['reply'] / df['view'], 0)
        
        if 'duration' in df.columns:
            df['duration_minutes'] = df['duration'] / 60
        
        # 移除異常值
        for col in ['view', 'like', 'coin', 'favorite', 'share', 'danmaku', 'reply']:
            if col in df.columns:
                Q1 = df[col].quantile(0.25)
                Q3 = df[col].quantile(0.75)
                IQR = Q3 - Q1
                lower_bound = Q1 - 1.5 * IQR
                upper_bound = Q3 + 1.5 * IQR
                
                original_len = len(df)
                df = df[(df[col] >= lower_bound) & (df[col] <= upper_bound)]
                removed_count = original_len - len(df)
                if removed_count > 0:
                    logger.info(f"移除 {col} 異常值: {removed_count} 條")
        
        # 保存清洗後的數據
        filename = f"cleaned_data_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv"
        filepath = os.path.join('cleaned_data', filename)
        df.to_csv(filepath, index=False, encoding='utf-8-sig')
        logger.info(f"清洗後數據已保存: {filepath}")
        
        # 清洗後自動生成可視化
        self.create_visualizations(df)

        return df
    
    def analyze_data(self, df):
        """數據分析"""
        logger.info("=" * 50)
        logger.info("步驟 3: 開始數據分析")
        logger.info("=" * 50)
        
        if df is None or len(df) == 0:
            logger.error("無數據可分析")
            return None
        
        results = {}
        
        # 基本統計
        logger.info("執行基本統計分析...")
        basic_stats = {
            '總視頻數': len(df),
            '總播放量': df['view'].sum(),
            '總點贊數': df['like'].sum(),
            '總投幣數': df['coin'].sum(),
            '總收藏數': df['favorite'].sum(),
            '總分享數': df['share'].sum(),
            '平均播放量': df['view'].mean(),
            '平均點贊數': df['like'].mean(),
            '平均投幣數': df['coin'].mean(),
            '平均收藏數': df['favorite'].mean(),
            '平均分享數': df['share'].mean(),
        }
        
        stats_df = pd.DataFrame(list(basic_stats.items()), columns=['指標', '數值'])
        stats_file = os.path.join('analysis_results', 'basic_statistics.csv')
        stats_df.to_csv(stats_file, index=False, encoding='utf-8-sig')
        results['basic_stats'] = stats_df
        
        # UP主分析
        logger.info("執行UP主分析...")
        up_stats = df.groupby('owner_name').agg({
            'bvid': 'count',
            'view': 'sum',
            'like': 'sum',
            'coin': 'sum',
            'favorite': 'sum',
            'interaction_rate': 'mean'
        }).reset_index()
        
        up_stats.columns = ['UP主', '視頻數量', '總播放量', '總點贊數', '總投幣數', '總收藏數', '平均互動率']
        up_stats = up_stats.sort_values('總播放量', ascending=False)
        
        up_file = os.path.join('analysis_results', 'up_creator_analysis.csv')
        up_stats.to_csv(up_file, index=False, encoding='utf-8-sig')
        results['up_stats'] = up_stats
        
        # 相關性分析
        logger.info("執行相關性分析...")
        numeric_cols = ['view', 'like', 'coin', 'favorite', 'share', 'interaction_rate']
        available_cols = [col for col in numeric_cols if col in df.columns]
        
        correlation_matrix = df[available_cols].corr()
        corr_file = os.path.join('analysis_results', 'correlation_matrix.csv')
        correlation_matrix.to_csv(corr_file, encoding='utf-8-sig')
        results['correlation_matrix'] = correlation_matrix
        
        # 熱門視頻分析
        logger.info("執行熱門視頻分析...")
        top_by_view = df.nlargest(20, 'view')[['title', 'owner_name', 'view', 'like', 'coin', 'favorite', 'interaction_rate']]
        top_by_like = df.nlargest(20, 'like')[['title', 'owner_name', 'view', 'like', 'coin', 'favorite', 'interaction_rate']]
        
        top_view_file = os.path.join('analysis_results', 'top_20_by_view.csv')
        top_like_file = os.path.join('analysis_results', 'top_20_by_like.csv')
        
        top_by_view.to_csv(top_view_file, index=False, encoding='utf-8-sig')
        top_by_like.to_csv(top_like_file, index=False, encoding='utf-8-sig')
        
        results['top_videos'] = {
            'by_view': top_by_view,
            'by_like': top_by_like
        }
        
        # 生成分析報告
        report = []
        report.append("=" * 50)
        report.append("B站每周必看數據分析報告")
        report.append("=" * 50)
        report.append(f"分析時間: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
        report.append(f"數據總量: {len(df)} 個視頻")
        report.append("")
        
        report.append("【基本統計】")
        report.append(f"總播放量: {df['view'].sum():,}")
        report.append(f"總點贊數: {df['like'].sum():,}")
        report.append(f"總投幣數: {df['coin'].sum():,}")
        report.append(f"平均播放量: {df['view'].mean():,.0f}")
        report.append("")
        
        report.append("【UP主統計】")
        unique_ups = df['owner_name'].nunique()
        report.append(f"參與UP主數量: {unique_ups}")
        
        top_up = df.groupby('owner_name')['view'].sum().sort_values(ascending=False).head(1)
        if not top_up.empty:
            report.append(f"最受歡迎UP主: {top_up.index[0]} (播放量: {top_up.iloc[0]:,})")
        
        report_file = os.path.join('analysis_results', 'analysis_summary_report.txt')
        with open(report_file, 'w', encoding='utf-8') as f:
            f.write('\n'.join(report))
        
        print('\n'.join(report))
        logger.info("數據分析完成！")
        
        return results
    
    def train_ml_model(self, df):
        """機器學習模型訓練"""
        logger.info("=" * 50)
        logger.info("步驟 4: 開始機器學習模型訓練")
        logger.info("=" * 50)
        
        if df is None or len(df) < 10:
            logger.error("數據量不足，無法訓練模型")
            return None
        
        try:
            from sklearn.model_selection import train_test_split
            from sklearn.ensemble import RandomForestClassifier
            from sklearn.linear_model import LogisticRegression
            from sklearn.metrics import classification_report, accuracy_score
            from sklearn.preprocessing import StandardScaler
            import joblib
            
            # 創建目標變量（前10名為熱門）
            df_sorted = df.sort_values('view', ascending=False)
            df_sorted['is_top_10'] = 0
            df_sorted.iloc[:10, df_sorted.columns.get_loc('is_top_10')] = 1
            
            # 準備特徵
            feature_columns = [
                'view', 'like', 'coin', 'favorite', 'share', 'danmaku', 'reply',
                'interaction_rate', 'like_rate', 'coin_rate', 'favorite_rate', 'share_rate',
                'danmaku_density', 'reply_density'
            ]
            
            available_features = [col for col in feature_columns if col in df_sorted.columns]
            
            # 處理缺失值
            for col in available_features:
                df_sorted[col] = df_sorted[col].fillna(df_sorted[col].mean())
            
            X = df_sorted[available_features]
            y = df_sorted['is_top_10']
            
            # 分割數據
            X_train, X_test, y_train, y_test = train_test_split(
                X, y, test_size=0.2, random_state=42, stratify=y
            )
            
            # 標準化
            scaler = StandardScaler()
            X_train_scaled = scaler.fit_transform(X_train)
            X_test_scaled = scaler.transform(X_test)
            
            # 訓練模型
            models = {
                'random_forest': RandomForestClassifier(n_estimators=100, random_state=42),
                'logistic_regression': LogisticRegression(random_state=42, max_iter=1000)
            }
            
            results = {}
            best_model = None
            best_accuracy = 0
            
            for name, model in models.items():
                logger.info(f"訓練 {name} 模型...")
                
                model.fit(X_train_scaled, y_train)
                y_pred = model.predict(X_test_scaled)
                accuracy = accuracy_score(y_test, y_pred)
                report = classification_report(y_test, y_pred)
                
                results[name] = {
                    'model': model,
                    'accuracy': accuracy,
                    'report': report
                }
                
                if accuracy > best_accuracy:
                    best_accuracy = accuracy
                    best_model = model
            
            # 保存最佳模型
            if best_model:
                model_file = os.path.join('ml_models', 'best_model.pkl')
                scaler_file = os.path.join('ml_models', 'scaler.pkl')
                features_file = os.path.join('ml_models', 'features.txt')
                
                joblib.dump(best_model, model_file)
                joblib.dump(scaler, scaler_file)
                
                with open(features_file, 'w', encoding='utf-8') as f:
                    f.write('\n'.join(available_features))
                
                logger.info(f"最佳模型已保存: {model_file}")
            
            # 生成模型報告
            report = []
            report.append("=" * 50)
            report.append("機器學習模型訓練報告")
            report.append("=" * 50)
            report.append(f"訓練時間: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
            report.append("")
            
            for name, result in results.items():
                report.append(f"【{name}】")
                report.append(f"準確率: {result['accuracy']:.4f}")
                report.append("分類報告:")
                report.append(result['report'])
                report.append("")
            
            model_report_file = os.path.join('ml_models', 'model_training_report.txt')
            with open(model_report_file, 'w', encoding='utf-8') as f:
                f.write('\n'.join(report))
            
            print('\n'.join(report))
            logger.info("機器學習模型訓練完成！")
            
            return results
            
        except ImportError as e:
            logger.error(f"缺少機器學習依賴包: {e}")
            logger.info("請安裝: pip install scikit-learn joblib")
            return None
        except Exception as e:
            logger.error(f"模型訓練失敗: {e}")
            return None
    
    def create_visualizations(self, df):
        """創建可視化圖表"""
        logger.info("=" * 50)
        logger.info("步驟 5: 開始生成可視化圖表")
        logger.info("=" * 50)
        
        if df is None or len(df) == 0:
            logger.error("無數據可視化")
            return None
        
        try:
            from pyecharts import options as opts
            from pyecharts.charts import Bar, Line, Pie, Scatter, HeatMap
            from pyecharts.globals import ThemeType
            
            # 播放量分布圖
            logger.info("創建播放量分布圖...")
            bins = [0, 10000, 50000, 100000, 500000, 1000000, float('inf')]
            labels = ['0-1萬', '1-5萬', '5-10萬', '10-50萬', '50-100萬', '100萬+']
            df['view_range'] = pd.cut(df['view'], bins=bins, labels=labels, right=False)
            view_counts = df['view_range'].value_counts().sort_index()
            
            bar = (
                Bar(init_opts=opts.InitOpts(theme=ThemeType.LIGHT))
                .add_xaxis(view_counts.index.tolist())
                .add_yaxis("視頻數量", view_counts.values.tolist())
                .set_global_opts(
                    title_opts=opts.TitleOpts(title="視頻播放量分布"),
                    xaxis_opts=opts.AxisOpts(name="播放量範圍"),
                    yaxis_opts=opts.AxisOpts(name="視頻數量"),
                    tooltip_opts=opts.TooltipOpts(trigger="axis")
                )
            )
            
            file_path = os.path.join('visualizations', "play_count_distribution.html")
            bar.render(file_path)
            logger.info(f"播放量分布圖已保存: {file_path}")
            
            # 熱門UP主圖
            logger.info("創建熱門UP主圖...")
            up_stats = df.groupby('owner_name').agg({
                'view': 'sum',
                'like': 'sum',
                'coin': 'sum'
            }).reset_index()
            
            top_ups = up_stats.nlargest(10, 'view')
            
            up_bar = (
                Bar(init_opts=opts.InitOpts(theme=ThemeType.LIGHT))
                .add_xaxis(top_ups['owner_name'].tolist())
                .add_yaxis("總播放量", top_ups['view'].tolist())
                .set_global_opts(
                    title_opts=opts.TitleOpts(title="熱門UP主播放量排行"),
                    xaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(rotate=45)),
                    yaxis_opts=opts.AxisOpts(name="總播放量"),
                    tooltip_opts=opts.TooltipOpts(trigger="axis")
                )
            )
            
            up_file = os.path.join('visualizations', "top_up_creators.html")
            up_bar.render(up_file)
            logger.info(f"熱門UP主圖已保存: {up_file}")
            
            # 互動率分析圖
            logger.info("創建互動率分析圖...")
            interaction_data = {
                '點贊率': df['like_rate'].mean(),
                '投幣率': df['coin_rate'].mean(),
                '收藏率': df['favorite_rate'].mean(),
                '分享率': df['share_rate'].mean()
            }
            
            pie = (
                Pie(init_opts=opts.InitOpts(theme=ThemeType.LIGHT))
                .add(
                    series_name="互動率",
                    data_pair=list(interaction_data.items()),
                    radius=["40%", "70%"]
                )
                .set_global_opts(
                    title_opts=opts.TitleOpts(title="平均互動率分布"),
                    legend_opts=opts.LegendOpts(orient="vertical", pos_top="15%", pos_left="2%")
                )
                .set_series_opts(label_opts=opts.LabelOpts(formatter="{b}: {c}"))
            )
            
            pie_file = os.path.join('visualizations', "interaction_rates.html")
            pie.render(pie_file)
            logger.info(f"互動率分析圖已保存: {pie_file}")
            
            # 創建綜合儀表板
            dashboard_html = f"""
            <!DOCTYPE html>
            <html>
            <head>
                <meta charset="utf-8">
                <title>B站每周必看數據分析儀表板</title>
                <style>
                    body {{ font-family: Arial, sans-serif; margin: 20px; }}
                    .header {{ text-align: center; margin-bottom: 30px; }}
                    .chart-container {{ margin-bottom: 40px; }}
                    .chart-title {{ font-size: 18px; font-weight: bold; margin-bottom: 10px; }}
                    iframe {{ width: 100%; height: 500px; border: none; }}
                </style>
            </head>
            <body>
                <div class="header">
                    <h1>B站每周必看數據分析儀表板</h1>
                    <p>生成時間: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}</p>
                    <p>數據總量: {len(df)} 個視頻</p>
                </div>
                
                <div class="chart-container">
                    <div class="chart-title">播放量分布</div>
                    <iframe src="play_count_distribution.html"></iframe>
                </div>
                
                <div class="chart-container">
                    <div class="chart-title">熱門UP主排行</div>
                    <iframe src="top_up_creators.html"></iframe>
                </div>
                
                <div class="chart-container">
                    <div class="chart-title">互動率分析</div>
                    <iframe src="interaction_rates.html"></iframe>
                </div>
            </body>
            </html>
            """
            
            dashboard_file = os.path.join('visualizations', "dashboard.html")
            with open(dashboard_file, 'w', encoding='utf-8') as f:
                f.write(dashboard_html)
            
            logger.info(f"綜合儀表板已保存: {dashboard_file}")
            logger.info("可視化圖表生成完成！")
            
            return dashboard_file
            
        except ImportError as e:
            logger.error(f"缺少可視化依賴包: {e}")
            logger.info("請安裝: pip install pyecharts")
            return None
        except Exception as e:
            logger.error(f"可視化生成失敗: {e}")
            return None
    
    def visualize_relationships(self, df):
        """生成視頻指標關係可視化圖表"""
        import matplotlib.pyplot as plt
        import seaborn as sns
        import os
        if not os.path.exists('visualizations'):
            os.makedirs('visualizations')
        # 相關係數熱力圖
        plt.figure(figsize=(10, 8))
        corr = df[['view', 'like', 'coin', 'favorite', 'share', 'interaction_rate']].corr()
        sns.heatmap(corr, annot=True, cmap='coolwarm')
        plt.title('各指標相關係數熱力圖')
        plt.tight_layout()
        plt.savefig('visualizations/heatmap_corr.png')
        plt.close()
        # 播放量 vs 點贊數
        plt.figure(figsize=(8, 6))
        sns.scatterplot(x='view', y='like', data=df, alpha=0.5)
        plt.xlabel('播放量')
        plt.ylabel('點贊數')
        plt.title('播放量 vs 點贊數')
        plt.tight_layout()
        plt.savefig('visualizations/view_vs_like.png')
        plt.close()
        # 互動熱度 vs 播放量
        plt.figure(figsize=(8, 6))
        sns.scatterplot(x='view', y='interaction_rate', data=df, alpha=0.5)
        plt.xlabel('播放量')
        plt.ylabel('互動熱度')
        plt.title('播放量 vs 互動熱度')
        plt.tight_layout()
        plt.savefig('visualizations/view_vs_interaction.png')
        plt.close()
        logger.info("已生成關係可視化圖表，請查看 visualizations 目錄。")

    def classify_top10(self, df):
        """視頻能否進入熱搜榜前十的分類分析"""
        from sklearn.ensemble import RandomForestClassifier
        from sklearn.model_selection import train_test_split
        from sklearn.metrics import classification_report, confusion_matrix, roc_auc_score
        import matplotlib.pyplot as plt
        import seaborn as sns
        import os
        if not os.path.exists('visualizations'):
            os.makedirs('visualizations')
        # 標註前十
        df = df.sort_values('view', ascending=False).copy()
        df['is_top10'] = 0
        df.iloc[:10, df.columns.get_loc('is_top10')] = 1
        features = ['view', 'like', 'coin', 'favorite', 'share', 'interaction_rate']
        X = df[features]
        y = df['is_top10']
        X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=0.2, random_state=42)
        clf = RandomForestClassifier(n_estimators=100, random_state=42)
        clf.fit(X_train, y_train)
        y_pred = clf.predict(X_test)
        y_prob = clf.predict_proba(X_test)[:, 1]
        # 保存分類報告
        report = classification_report(y_test, y_pred, digits=4)
        cm = confusion_matrix(y_test, y_pred)
        auc = roc_auc_score(y_test, y_prob)
        with open('visualizations/top10_classification_report.txt', 'w', encoding='utf-8') as f:
            f.write("分類報告：\n" + report + "\n")
            f.write("混淆矩陣：\n" + str(cm) + "\n")
            f.write(f"AUC：{auc}\n")
        # 特徵重要性
        plt.figure(figsize=(8, 5))
        sns.barplot(x=clf.feature_importances_, y=features)
        plt.title('特徵重要性')
        plt.tight_layout()
        plt.savefig('visualizations/feature_importance.png')
        plt.close()
        logger.info("已完成熱搜榜前十分類分析，詳見 visualizations 目錄。")

    def run_complete_analysis(self):
        """運行完整的分析流程"""
        print("B站每周必看數據分析完整程序")
        print("=" * 60)
        # 步驟1: 爬取數據
        df = self.crawl_weekly_data()
        if df is None:
            logger.error("爬取數據失敗，程序終止")
            return
        # 步驟2: 數據清洗
        cleaned_df = self.clean_data(df)
        if cleaned_df is None:
            logger.error("數據清洗失敗，程序終止")
            return
        # 步驟2.5: 可視化關係分析
        self.visualize_relationships(cleaned_df)
        # 步驟2.6: 熱搜榜前十分類
        self.classify_top10(cleaned_df)
        # 步驟3: 數據分析
        analysis_results = self.analyze_data(cleaned_df)
        if analysis_results is None:
            logger.error("數據分析失敗，程序終止")
            return
        # 步驟4: 機器學習
        ml_results = self.train_ml_model(cleaned_df)
        # 步驟5: 可視化
        viz_file = self.create_visualizations(cleaned_df)
        # 生成最終報告
        self.generate_final_report(cleaned_df, analysis_results, ml_results, viz_file)
        print("\n" + "=" * 60)
        print("🎉 分析完成！")
        print("📊 請打開 visualizations/dashboard.html 查看可視化結果")
        print("�� 所有結果文件已保存在相應目錄中")
    
    def generate_final_report(self, df, analysis_results, ml_results, viz_file):
        """生成最終報告"""
        logger.info("=" * 50)
        logger.info("生成最終分析報告")
        logger.info("=" * 50)
        
        report = []
        report.append("=" * 60)
        report.append("B站每周必看數據分析項目完成報告")
        report.append("=" * 60)
        report.append(f"完成時間: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
        report.append("")
        
        # 檢查各目錄中的文件
        directories = {
            '原始數據': 'data',
            '清洗後數據': 'cleaned_data',
            '分析結果': 'analysis_results',
            '機器學習模型': 'ml_models',
            '可視化圖表': 'visualizations'
        }
        
        for name, dir_path in directories.items():
            if os.path.exists(dir_path):
                files = [f for f in os.listdir(dir_path) if f.endswith(('.csv', '.txt', '.html', '.pkl'))]
                report.append(f"{name}目錄 ({dir_path}): {len(files)} 個文件")
            else:
                report.append(f"{name}目錄 ({dir_path}): 不存在")
        
        report.append("")
        report.append("【數據統計】")
        report.append(f"總視頻數: {len(df)}")
        report.append(f"總播放量: {df['view'].sum():,}")
        report.append(f"總點贊數: {df['like'].sum():,}")
        report.append(f"參與UP主數: {df['owner_name'].nunique()}")
        
        if ml_results:
            best_model = max(ml_results.keys(), key=lambda x: ml_results[x]['accuracy'])
            best_accuracy = ml_results[best_model]['accuracy']
            report.append(f"最佳模型: {best_model} (準確率: {best_accuracy:.4f})")
        
        report.append("")
        report.append("【使用說明】")
        report.append("1. 查看原始數據: 打開 data 目錄中的CSV文件")
        report.append("2. 查看清洗後數據: 打開 cleaned_data 目錄中的CSV文件")
        report.append("3. 查看分析結果: 打開 analysis_results 目錄中的文件")
        report.append("4. 查看機器學習模型: 打開 ml_models 目錄中的文件")
        if viz_file:
            report.append(f"5. 查看可視化圖表: 打開 {viz_file}")
        
        report.append("")
        report.append("【項目功能】")
        report.append("✓ 爬取B站每周必看視頻數據")
        report.append("✓ 數據清洗和預處理")
        report.append("✓ 基本統計分析和UP主分析")
        report.append("✓ 相關性分析和熱門視頻分析")
        if ml_results:
            report.append("✓ 機器學習模型訓練（熱搜預測）")
        if viz_file:
            report.append("✓ 交互式可視化圖表")
        report.append("")
        report.append("=" * 60)
        
        # 保存報告
        report_file = "final_analysis_report.txt"
        with open(report_file, 'w', encoding='utf-8') as f:
            f.write('\n'.join(report))
        
        print('\n'.join(report))
        logger.info(f"最終報告已保存: {report_file}")

def main():
    """主函數"""
    analyzer = BilibiliWeeklyAnalyzer()
    
    # 檢查依賴
    try:
        import requests
        import pandas as pd
        import numpy as np
        print("✓ 基本依賴檢查通過")
    except ImportError as e:
        print(f"✗ 缺少依賴包: {e}")
        print("請運行: pip install requests pandas numpy")
        return
    
    # 運行完整分析
    analyzer.run_complete_analysis()

if __name__ == "__main__":
    main() 