"""
B站视频信息爬虫
支持爬取UP主视频列表、视频详情、评论等信息
"""

import requests
import json
import time
import random
import os
from datetime import datetime
from urllib.parse import urlparse, parse_qs
import re


class BilibiliCrawler:
    """
    B站爬虫类
    支持获取UP主视频列表、视频详情、评论等信息
    """
    
    def __init__(self):
        self.session = requests.Session()
        # 设置请求头，模拟真实用户
        self.session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Referer': 'https://www.bilibili.com/',
            'Accept': 'application/json, text/plain, */*',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
        })
        
    def get_user_videos(self, uid, page=1, page_size=30):
        """
        获取UP主的视频列表
        
        Args:
            uid (str): UP主的用户ID
            page (int): 页码，从1开始
            page_size (int): 每页视频数量，最大50
            
        Returns:
            dict: 包含视频列表的响应数据
        """
        url = "https://api.bilibili.com/x/space/wbi/arc/search"
        
        params = {
            'mid': uid,
            'pn': page,
            'ps': min(page_size, 50),  # 限制最大页面大小
            'index': 1,
            'jsonp': 'jsonp'
        }
        
        try:
            print(f"🔍 正在获取UP主 {uid} 第{page}页视频列表...")
            response = self.session.get(url, params=params, timeout=10)
            response.raise_for_status()
            
            data = response.json()
            if data.get('code') == 0:
                print(f"✅ 成功获取 {len(data['data']['list']['vlist'])} 个视频")
                return data
            else:
                print(f"❌ 获取失败: {data.get('message', '未知错误')}")
                return None
                
        except Exception as e:
            print(f"❌ 请求异常: {e}")
            return None
    
    def get_video_info(self, bv_id):
        """
        获取视频详细信息
        
        Args:
            bv_id (str): 视频的BV号
            
        Returns:
            dict: 视频详细信息
        """
        url = "https://api.bilibili.com/x/web-interface/view"
        
        params = {
            'bvid': bv_id
        }
        
        try:
            print(f"📹 正在获取视频 {bv_id} 详细信息...")
            response = self.session.get(url, params=params, timeout=10)
            response.raise_for_status()
            
            data = response.json()
            if data.get('code') == 0:
                print(f"✅ 成功获取视频信息: {data['data']['title']}")
                return data['data']
            else:
                print(f"❌ 获取失败: {data.get('message', '未知错误')}")
                return None
                
        except Exception as e:
            print(f"❌ 请求异常: {e}")
            return None
    
    def get_video_comments(self, aid, page=1, page_size=20):
        """
        获取视频评论
        
        Args:
            aid (str): 视频的av号
            page (int): 页码
            page_size (int): 每页评论数量
            
        Returns:
            dict: 评论数据
        """
        url = "https://api.bilibili.com/x/v2/reply/main"
        
        params = {
            'oid': aid,
            'type': 1,  # 视频类型
            'mode': 3,  # 排序模式：3=热度，2=时间
            'pagination_str': json.dumps({
                'offset': json.dumps({'type': 1, 'direction': 1, 'data': {'pn': page}})
            }),
            'plat': 1,
            'seek_rpid': '',
            'web_location': 1315875
        }
        
        try:
            print(f"💬 正在获取视频 av{aid} 第{page}页评论...")
            response = self.session.get(url, params=params, timeout=10)
            response.raise_for_status()
            
            data = response.json()
            if data.get('code') == 0:
                comments = data['data']['replies'] or []
                print(f"✅ 成功获取 {len(comments)} 条评论")
                return data['data']
            else:
                print(f"❌ 获取评论失败: {data.get('message', '未知错误')}")
                return None
                
        except Exception as e:
            print(f"❌ 请求异常: {e}")
            return None
    
    def get_user_info(self, uid):
        """
        获取用户基本信息
        
        Args:
            uid (str): 用户ID
            
        Returns:
            dict: 用户信息
        """
        url = "https://api.bilibili.com/x/space/acc/info"
        
        params = {
            'mid': uid
        }
        
        try:
            print(f"👤 正在获取用户 {uid} 基本信息...")
            response = self.session.get(url, params=params, timeout=10)
            response.raise_for_status()
            
            data = response.json()
            if data.get('code') == 0:
                user_info = data['data']
                print(f"✅ 用户信息: {user_info['name']} (粉丝数: {user_info['follower']})")
                return user_info
            else:
                print(f"❌ 获取失败: {data.get('message', '未知错误')}")
                return None
                
        except Exception as e:
            print(f"❌ 请求异常: {e}")
            return None
    
    def extract_uid_from_url(self, url):
        """
        从B站用户主页URL中提取UID
        
        Args:
            url (str): B站用户主页URL
            
        Returns:
            str: 用户UID，如果提取失败返回None
        """
        # 支持多种URL格式
        patterns = [
            r'space\.bilibili\.com/(\d+)',  # https://space.bilibili.com/123456
            r'bilibili\.com/(\d+)',         # https://www.bilibili.com/123456
            r'mid=(\d+)',                   # 包含mid参数的URL
        ]
        
        for pattern in patterns:
            match = re.search(pattern, url)
            if match:
                return match.group(1)
        
        # 如果URL就是纯数字，直接返回
        if url.isdigit():
            return url
            
        return None
    
    def save_to_json(self, data, filename):
        """
        保存数据到JSON文件
        
        Args:
            data: 要保存的数据
            filename (str): 文件名
        """
        try:
            # 创建保存目录
            save_dir = "bilibili_data"
            if not os.path.exists(save_dir):
                os.makedirs(save_dir)
            
            filepath = os.path.join(save_dir, filename)
            
            with open(filepath, 'w', encoding='utf-8') as f:
                json.dump(data, f, ensure_ascii=False, indent=2)
            
            print(f"💾 数据已保存到: {filepath}")
            
        except Exception as e:
            print(f"❌ 保存失败: {e}")
    
    def crawl_user_all_videos(self, uid, max_pages=None):
        """
        爬取用户的所有视频信息
        
        Args:
            uid (str): 用户ID
            max_pages (int): 最大页数限制，None表示爬取全部
            
        Returns:
            list: 所有视频信息列表
        """
        print(f"🚀 开始爬取用户 {uid} 的所有视频...")
        
        # 获取用户信息
        user_info = self.get_user_info(uid)
        if not user_info:
            print("❌ 无法获取用户信息，爬取终止")
            return []
        
        all_videos = []
        page = 1
        
        while True:
            if max_pages and page > max_pages:
                print(f"⚠️ 达到最大页数限制: {max_pages}")
                break
            
            # 获取当前页视频列表
            result = self.get_user_videos(uid, page)
            if not result:
                print("❌ 获取视频列表失败")
                break
            
            videos = result['data']['list']['vlist']
            if not videos:
                print("✅ 已获取所有视频")
                break
            
            all_videos.extend(videos)
            
            # 检查是否还有更多页面
            page_info = result['data']['page']
            if page >= page_info['count'] // page_info['ps'] + 1:
                print("✅ 已获取所有视频")
                break
            
            page += 1
            
            # 添加延迟，避免请求过快
            time.sleep(random.uniform(1, 3))
        
        print(f"🎉 爬取完成！共获取 {len(all_videos)} 个视频")
        
        # 保存数据
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        filename = f"{user_info['name']}_videos_{timestamp}.json"
        
        save_data = {
            'user_info': user_info,
            'videos': all_videos,
            'crawl_time': datetime.now().isoformat(),
            'total_count': len(all_videos)
        }
        
        self.save_to_json(save_data, filename)
        
        return all_videos
    
    def analyze_video_data(self, videos):
        """
        分析视频数据，生成统计信息
        
        Args:
            videos (list): 视频列表
            
        Returns:
            dict: 统计信息
        """
        if not videos:
            return {}
        
        total_views = sum(v.get('play', 0) for v in videos)
        total_comments = sum(v.get('comment', 0) for v in videos)
        total_likes = sum(v.get('video_review', 0) for v in videos)
        
        # 按发布时间排序，找出最新和最早的视频
        sorted_videos = sorted(videos, key=lambda x: x.get('created', 0), reverse=True)
        
        # 播放量最高的视频
        most_viewed = max(videos, key=lambda x: x.get('play', 0))
        
        # 最受欢迎的视频（点赞数）
        most_liked = max(videos, key=lambda x: x.get('video_review', 0))
        
        analysis = {
            'total_videos': len(videos),
            'total_views': total_views,
            'total_comments': total_comments,
            'total_likes': total_likes,
            'avg_views': total_views // len(videos) if videos else 0,
            'avg_comments': total_comments // len(videos) if videos else 0,
            'avg_likes': total_likes // len(videos) if videos else 0,
            'latest_video': {
                'title': sorted_videos[0].get('title', ''),
                'bvid': sorted_videos[0].get('bvid', ''),
                'play': sorted_videos[0].get('play', 0)
            } if sorted_videos else None,
            'most_viewed_video': {
                'title': most_viewed.get('title', ''),
                'bvid': most_viewed.get('bvid', ''),
                'play': most_viewed.get('play', 0)
            },
            'most_liked_video': {
                'title': most_liked.get('title', ''),
                'bvid': most_liked.get('bvid', ''),
                'likes': most_liked.get('video_review', 0)
            }
        }
        
        return analysis


def main():
    """主函数"""
    print("🚀 B站视频信息爬虫启动")
    print("=" * 50)
    
    crawler = BilibiliCrawler()
    
    # 获取用户输入
    while True:
        user_input = input("\n请输入B站用户主页URL或UID (输入 'q' 退出): ").strip()
        
        if user_input.lower() == 'q':
            print("👋 再见！")
            break
        
        # 提取UID
        uid = crawler.extract_uid_from_url(user_input)
        if not uid:
            print("❌ 无法从输入中提取有效的UID，请检查输入格式")
            continue
        
        print(f"✅ 提取到UID: {uid}")
        
        try:
            # 询问爬取选项
            print("\n请选择操作:")
            print("1. 爬取所有视频信息")
            print("2. 爬取指定页数的视频")
            print("3. 获取用户基本信息")
            print("4. 获取最新视频详情")
            
            choice = input("请输入选项 (1-4): ").strip()
            
            if choice == '1':
                # 爬取所有视频
                videos = crawler.crawl_user_all_videos(uid)
                
                if videos:
                    # 生成分析报告
                    analysis = crawler.analyze_video_data(videos)
                    print("\n📊 数据分析报告:")
                    print(f"   总视频数: {analysis['total_videos']}")
                    print(f"   总播放量: {analysis['total_views']:,}")
                    print(f"   总评论数: {analysis['total_comments']:,}")
                    print(f"   总点赞数: {analysis['total_likes']:,}")
                    print(f"   平均播放量: {analysis['avg_views']:,}")
                    
                    if analysis.get('most_viewed_video'):
                        print(f"   最高播放量视频: {analysis['most_viewed_video']['title']} ({analysis['most_viewed_video']['play']:,} 播放)")
            
            elif choice == '2':
                # 爬取指定页数
                try:
                    max_pages = int(input("请输入要爬取的页数: "))
                    videos = crawler.crawl_user_all_videos(uid, max_pages)
                except ValueError:
                    print("❌ 请输入有效的页数")
                    continue
            
            elif choice == '3':
                # 获取用户信息
                user_info = crawler.get_user_info(uid)
                if user_info:
                    print(f"\n👤 用户信息:")
                    print(f"   昵称: {user_info['name']}")
                    print(f"   简介: {user_info.get('sign', '暂无简介')}")
                    print(f"   粉丝数: {user_info['follower']:,}")
                    print(f"   关注数: {user_info['following']:,}")
                    print(f"   等级: Lv{user_info['level']}")
            
            elif choice == '4':
                # 获取最新视频详情
                videos_result = crawler.get_user_videos(uid, 1, 1)
                if videos_result and videos_result['data']['list']['vlist']:
                    latest_video = videos_result['data']['list']['vlist'][0]
                    bv_id = latest_video['bvid']
                    
                    video_info = crawler.get_video_info(bv_id)
                    if video_info:
                        print(f"\n📹 最新视频详情:")
                        print(f"   标题: {video_info['title']}")
                        print(f"   简介: {video_info['desc'][:100]}...")
                        print(f"   播放量: {video_info['stat']['view']:,}")
                        print(f"   点赞数: {video_info['stat']['like']:,}")
                        print(f"   评论数: {video_info['stat']['reply']:,}")
                        print(f"   发布时间: {datetime.fromtimestamp(video_info['pubdate'])}")
            
            else:
                print("❌ 无效选项")
                continue
            
        except KeyboardInterrupt:
            print("\n⚠️ 操作被用户中断")
            continue
        except Exception as e:
            print(f"❌ 操作失败: {e}")
            continue
        
        # 添加延迟
        time.sleep(1)


if __name__ == "__main__":
    main() 