# 此为不用webdriver的爬虫版本，但是这个文件接近荒废，我实际中用的是webdriver
# python weibo_crawler.py -u 1642909335 [-c "Cookie String"] [-o output_directory] [-f storage_file_path]
import os
import re
import json
import argparse
import traceback
import logging
import requests
import pandas as pd
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
import random
import time
from datetime import datetime, timedelta

img_pattern = re.compile('组图|图片')

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


def handle_time(publish_time_str):
    now = datetime.now()
    if '分钟前' in publish_time_str:
        minutes_ago = int(re.findall(r'(\d+)分钟前', publish_time_str)[0])
        publish_time = now - timedelta(minutes=minutes_ago)
    elif '秒前' in publish_time_str:
        seconds_ago = int(re.findall(r'(\d+)秒前', publish_time_str)[0])
        publish_time = now - timedelta(seconds=seconds_ago)
    elif '今天' in publish_time_str:
        time_part = re.findall(r'今天 (\d+:\d+)', publish_time_str)[0]
        publish_time = datetime.strptime(f"{now.year}-{now.month}-{now.day} {time_part}", '%Y-%m-%d %H:%M')
    else:
        month, day, time_part = re.findall(r'(\d+)月(\d+)日 (\d+:\d+)', publish_time_str)[0]
        publish_time = datetime.strptime(f"{now.year}-{month}-{day} {time_part}", '%Y-%m-%d %H:%M')

    return publish_time
    # publish_time = publish_time.strftime('%Y-%m-%d %H:%M:%S')

class WeiboCrawler:
    def __init__(self, user_id, cookie, output_dir='./output'):
        self.user_id = user_id
        self.storage_path = 'weibo_record.db'
        self.output_dir = output_dir
        self.ua = UserAgent()
        # 用户数据存储
        self.user_info = {}
        self.weibo_data = []
        # 设置请求头
        self.headers = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "Accept-Encoding": "gzip, deflate, br",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Cookie": "SCF=Aj6Om1OXZlokGRLDEsWz_kKLqPnVFYUWhR_s8FVpaJtG7CvXgN92qI-TLK1mO27ON4a5igrbYATizoBugWbSj1Y.; SUB=_2A25FEXPVDeRhGeBL7lUV9C_JzjiIHXVmb4kdrDV6PUJbktAYLWaskW1NRu7oEkIZWq2bCuvMA4vpNTC1ezDnhT2f; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WFVceO6z43BbvamNp5sASPg5JpX5KMhUgL.FoqfSKMXSh2fSKB2dJLoIX.LxKBLB.zLB.zLxK-L12qLBoqLxKBLBonL1h.LxKqLBozL1K5LxK.LBKeL12Hki--Ri-2pi-2fi--Ni-88iK.Ni--fi-82iK.7; ALF=1748799621",
            "Sec-Ch-Ua": '"Not)A;Brand";v="24", "Chromium";v="116"',
            "Sec-Ch-Ua-Mobile": "?0",
            "Sec-Ch-Ua-Platform": '"Windows"',
            "Sec-Fetch-Dest": "document",
            "Sec-Fetch-Mode": "navigate",
            "Sec-Fetch-Site": "none",
            "Sec-Fetch-User": "?1",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.5845.97 Safari/537.36 Core/1.116.498.400 QQBrowser/13.7.6352.400"
        }
        # self.headers = {
        #     'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
        #     'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        #     'Accept-Encoding': 'gzip, deflate, br',
        #     'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
        #     'Connection': 'keep-alive',
        #     'Upgrade-Insecure-Requests': '1',
        #     'Sec-Fetch-Dest': 'document',
        #     'Sec-Fetch-Mode': 'navigate',
        #     'Sec-Fetch-Site': 'none',
        #     'Sec-Fetch-User': '?1',
        #     'Referer': 'https://weibo.cn/',
        #     'Cookie': cookie  # 初始化时传入的 Cookie 字符串
        # }

        # self.headers = {
        #     'User-Agent': self.ua.random,
        #     'Accept': 'text/html, application/xhtml+xml, application/xml;q=0.9,image/webp,*/ *; q=0.8',
        #     'Accept-Encoding': 'gzip, deflate, sdch',
        #     'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6',
        #     'Connection': 'keep-alive'
        # }

        # if cookie:self.headers['Cookie'] = cookie

    def get_user_info(self):
        """获取用户基本信息"""
        url = f'https://weibo.cn/{self.user_id}/info'
        response = self._make_request(url)

        if not response:
            logging.error(f"获取用户信息失败:{self.user_id}")
            return False

        html = BeautifulSoup(response.text, 'lxml')
        user_info_text = html.find_all('div', class_='c')

        self.user_info = {
            'user_id': self.user_id,
            'nickname': '未知',
            'gender': '未知',
            'location': '未知',
            'birthday': '未知',
            'description': '未知',
            'verified_reason': '未知',
            'weibo_count': 0,
            'following_count': 0,
            'follower_count': 0
        }

        if len(user_info_text) > 1:
            info_text = user_info_text[1].get_text()
            nickname = re.findall('昵称:(.*?)\n', info_text + '\n')
            gender = re.findall('性别:(.*?)\n', info_text + '\n')
            location = re.findall('地区:(.*?)\n', info_text + '\n')
            description = re.findall('简介:(.*?)\n', info_text + '\n')
            self.user_info['nickname'] = nickname[0] if nickname else '未知'
            self.user_info['gender'] = gender[0] if gender else '未知'
            self.user_info['location'] = location[0] if location else '未知'
            self.user_info['description'] = description[0] if description else '未知'

        # 获取微博、关注、粉丝数
            url = f'https://weibo.cn/{self.user_id}/profile?display=0&retcode=6102'
        response = self._make_request(url)

        if response:
            # 新增：保存原始HTML内容到文件
            file = os.path.join(self.output_dir, f'{self.user_id}_page.html')
            if not os.path.exists(file):
                with open(file, 'w', encoding='utf-8') as f:
                    f.write(response.text)
                logging.info(f"当前页面HTML已保存到 {file}")

            html = BeautifulSoup(response.text, 'lxml')
            profile_text = html.find('div', class_='tip2').get_text()
            weibo = re.findall('微博\[(\d+)\]', profile_text)
            following = re.findall('关注\[(\d+)\]', profile_text)
            follower = re.findall('粉丝\[(\d+)\]', profile_text)

            self.user_info['weibo_count'] = int(weibo[0]) if weibo else 0
            self.user_info['following_count'] = int(following[0]) if following else 0
            self.user_info['follower_count'] = int(follower[0]) if follower else 0

        logging.info(f"用户信息获取成功: {self.user_info['nickname']}")
        return True

    def get_weibo_data(self, max_pages=None):
        """获取用户微博数据
        Args:
            max_pages:最大爬取页数，None标识爬取所有
        """
        page = 1
        while True:
            url = f'https://weibo.cn/{self.user_id}?page={page}'
            response = self._make_request(url)

            if not response:
                logging.error(f"获取第{page}页微博失败")
                break

            html = BeautifulSoup(response.text, 'lxml')
            weibo_divs = html.find_all('div', class_='c', id=lambda x: x and x.startswith('M_'))

            if not weibo_divs:
                logging.info("已到达最后一页或无法获取微博内容")
                break

            for div in weibo_divs:
                weibo = self._parse_weibo(div)
                if weibo:
                    self.weibo_data.append(weibo)

            # 判断是否有下一页
            if max_pages and page >= max_pages:
                logging.info(f"已达到最大页码{page}页，已退出爬取")
                break

            if html.find('div', class_='pa', id='pagelist'):
                if not html.find('div', class_='pa', id='pagelist').find('a', text='下页'):
                    logging.info(f"已到达最后一页，已退出爬取")
                    break
            else:
                logging.info(f"未找到分页信息，已经达到最后一页，已退出爬取")
                break

            page += 1
            logging.info(f"正在爬取第{page}页")
            time.sleep(random.uniform(1, 3))  # 随机休眠1到3秒

        logging.info(f"共获取到{len(self.weibo_data)} 条微博")

    def _parse_weibo(self, div):
        """
        :param div: BeautifulSoup div对象
        :return: weibo_dict:微博数据字典
        """
        try:
            weibo_id = div['id'].split('_')[1]
            ctt = div.find('span', class_='ctt')
            content = ctt.get_text().strip() if ctt else ''

            # 检查是否有“全文”链接
            full_text_link = div.find('a', text='全文')
            if full_text_link:
                full_text_url = f"https://weibo.cn{full_text_link['href']}"
                full_text_response = self._make_request(full_text_url)
                if full_text_response:
                    full_text_html = BeautifulSoup(full_text_response.text, 'lxml')
                    full_ctt = full_text_html.find('span', class_='ctt')
                    if full_ctt:
                        content = full_ctt.get_text().strip()

            # 获取发布的时间和来源
            info = div.find_all('div')[-1].get_text()
            # publish_time = re.findall('(\d+月\d+日 \d+:\d+|\d+分钟前|\d+秒前|今天 \d+:\d+)', info)[0]
            publish_time = handle_time(info[-15:])

            source = None
            source_match = re.findall('来自(.*)', info)
            if source_match:
                source = source_match[0].strip()

            # 获取转发、评论、点赞数
            repost = re.findall('转发\[(\d+)\]', info)
            repost_count = int(repost[0]) if repost else 0
            comment = re.findall('评论\[(\d+)\]', info)
            comment_count = int(comment[0]) if comment else 0
            like = re.findall('赞\[(\d+)\]', info)
            like_count = int(like[0]) if like else 0

            # 获取图片
            images = []
            imgs = div.find('a', text=img_pattern)
            if imgs:
                img_link = imgs['href']
                img_response = self._make_request(f"https://weibo.cn{img_link}")
                if img_response:
                    img_html = BeautifulSoup(img_response.text, 'lxml')
                    img_tags = img_html.find_all('img', attrs={'alt': 'pic'})
                    for img in img_tags:
                        if 'src' in img.attrs:
                            images.append(img['src'])
        except Exception as e:
            logging.error(f"解析微博数据失败:{e}")
            traceback.print_exc()
            return None

        weibo_dict = {
            'id': weibo_id,
            'content': content,
            'publish_time': publish_time,
            'source': source,
            'repost_count': repost_count,
            'comment_count': comment_count,
            'like_count': like_count,
            'images': images
        }
        return weibo_dict

    # 将 Cookie 字符串转换为字典
    @staticmethod
    def _parse_cookies(cookie_str):
        if not cookie_str:
            return {}
        cookies_dict = {}
        for item in cookie_str.split("; "):
            key, value = item.split("=", 1)
            cookies_dict[key] = value
        return cookies_dict

    def _make_request(self, url, max_retries=3, timeout=10):
        retries = 0
        while retries < max_retries:
            try:
                response = requests.get(url, headers=self.headers, timeout=timeout)
                if response.status_code == 200:
                    return response
                elif response.status_code == 403:
                    logging.error(f"请求被拒绝访问，可能是cookies已经失效：URL{url},  用户ID:{self.user_id}")
                    return False
                elif response.status_code == 404:
                    logging.error(f"用户不存在:{self.user_id}")
                    return False
                else:
                    logging.error(f"请求失败，状态码:{response.status_code}，URL:{url}")
            except Exception as e:
                logging.error(f"请求失败，错误信息:{e},URL:{url}")
                traceback.print_exc()

            retries += 1
            logging.info(f"请求失败，正在重试({retries}/{max_retries})...")
            time.sleep(random.uniform(1, 3))

        logging.error(f"请求失败，已放弃:{url}")
        return None

    def save_results(self):
        """保存爬取结果到本地"""
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)

        user_info_path = os.path.join(self.output_dir, f'{self.user_id}_user_info.json')
        with open(user_info_path, 'w', encoding='utf-8') as f:
            json.dump(self.user_info, f, ensure_ascii=False, indent=4)
        logging.info(f"用户信息已保存到 {user_info_path}")

        weibo_data_path = os.path.join(self.output_dir, f'{self.user_id}_weibo_data.json')
        with open(weibo_data_path, 'w', encoding='utf-8') as f:
            json.dump(self.weibo_data, f, ensure_ascii=False, indent=4)
        logging.info(f"微博数据已保存到 {weibo_data_path}")


def main():
    parser = argparse.ArgumentParser(description='微博爬虫 - 爬取指定用户的微博内容')
    parser.add_argument('-u', '--user_id', required=True, help='用户ID')
    parser.add_argument('-c', '--cookie', help='微博Cookie字符串，用于登录状态')
    parser.add_argument('-o', '--output_dir', default='./output', help='输出目录，默认为./output')
    parser.add_argument('-p', '--max_pages', type=int, help='最大爬取页数，默认爬取所有页')
    parser.add_argument('-f', '--cookie_file', help='Cookie文件路径，文件内直接粘贴Cookie字符串')

    args = parser.parse_args()

    # 处理Cookie
    cookie = args.cookie
    if not cookie and args.cookie_file and os.path.exists(args.cookie_file):
        with open(args.cookie_file, 'r', encoding='utf-8') as f:
            cookie = f.read().strip()

    # 创建爬虫实例
    crawler = WeiboCrawler(args.user_id, cookie=cookie, output_dir=args.output_dir)

    # 获取用户信息
    if crawler.get_user_info():
        # 获取微博数据
        crawler.get_weibo_data(max_pages=args.max_pages)
        # 保存结果
        crawler.save_results()

def run():
    # user_id = 6557441554
    user_id = 6556240361
    cookie = '''SINAGLOBAL=9586039181107.584.1700131050731; ULV=1741919978564:63:1:1:9782236051301.93.1741919978481:1736127029099; XSRF-TOKEN=kKHgXNxMDjb9HlYF8nAa_FjX; ALF=1748799619; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WFVceO6z43BbvamNp5sASPg5JpX5KMhUgL.FoqfSKMXSh2fSKB2dJLoIX.LxKBLB.zLB.zLxK-L12qLBoqLxKBLBonL1h.LxKqLBozL1K5LxK.LBKeL12Hki--Ri-2pi-2fi--Ni-88iK.Ni--fi-82iK.7'''
    output_dir = 'output'
    # 创建爬虫实例
    crawler = WeiboCrawler(user_id, cookie=cookie, output_dir=output_dir)

    # 获取用户信息
    if crawler.get_user_info():
        # 获取微博数据
        crawler.get_weibo_data(max_pages=None)
        # 保存结果
        crawler.save_results()




if __name__ == '__main__':
    # main()
    run()
