import requests
import openpyxl
from openpyxl import load_workbook
from bs4 import BeautifulSoup
import time
from typing import Optional
import logging

# 配置日志 - 只输出到控制台
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s - %(levelname)s - %(message)s",
    handlers=[logging.StreamHandler()],
)
logger = logging.getLogger(__name__)


class WeiboViewsUpdater:
    def __init__(self):
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/140.0.0.0 Safari/537.36 Edg/140.0.0.0",
            # Cookie使用时进行替换，先登录，找到Cookie
            "Cookie": "SINAGLOBAL=685256167607.8839.1755244227678; SCF=AmzweZhDLJwixFb0uowQdu3FbkLojkq4YU5f0mPjU3kywAIf2ccIyoP01TPkn_ohyqixoxJs2k1P6mdBdMobAhQ.; UOR=history.ifeng.com,widget.weibo.com,onebox.huawei.com; PC_TOKEN=5e16a319cf; WBStorage=aae38921|undefined; _s_tentry=-; Apache=8764476725296.186.1757946469199; ULV=1757946469200:6:4:1:8764476725296.186.1757946469199:1757317024331; XSRF-TOKEN=fvyMLg3zzLRytgX4iucrMdWG; ALF=1760538473; SUB=_2A25FzFI5DeRhGeVH6VoT-CnMyTiIHXVmoOvxrDV8PUJbkNAbLXX6kW1NT0roxIwTlMMRezHWw9pr6YmFzi7rzZyT; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WhUmjESqsS..GlIwNV64gDw5JpX5KMhUgL.Foe4eonE1hM7eoB2dJLoI05LxKBLBonL1h5LxK-L12qLB-2LxK-LBKBLBKMLxKnL1--L1heLxKnL1-qLBoLix2Yt; WBPSESS=kWuRyR4YyQb3Gw8UcaMmppXHort2sr8dW9ii2bx1R3t3aH_AiVGvhF0QcvShzYCD9kWGHrSYavryAQ05DKw2b1AyM4I57oi1Pa4gfIg-7PYzyFiGtaNiy_6rF6WMmbL8FAdda2kBuQ1isicphWJT8w==",
        }
        self.session = requests.Session()
        self.session.headers.update(self.headers)

    def get_weibo_article_views(self, url: str) -> str:
        """
        获取微博文章的阅读数

        Args:
            url: 微博文章URL

        Returns:
            阅读数字符串，如果出错返回错误信息
        """
        try:
            logger.info(f"正在获取URL的阅读数: {url}")

            response = self.session.get(url, timeout=10)
            response.raise_for_status()

            soup = BeautifulSoup(response.text, "html.parser")

            # 查找阅读数元素
            read_count_element = soup.find("span", class_="num")

            if read_count_element:
                read_count_text = read_count_element.get_text(strip=True)
                if "阅读数：" in read_count_text:
                    read_count = read_count_text.split("：")[-1]
                    logger.info(f"成功获取阅读数: {read_count}")
                    return read_count
                else:
                    logger.warning(f"阅读数格式异常: {read_count_text}")
                    return "None"
            else:
                logger.warning("未找到阅读数元素")
                return "None"

        except requests.exceptions.RequestException as e:
            logger.error(f"网络请求错误: {e}")
            return f"请求错误: {e}"
        except Exception as e:
            logger.error(f"解析错误: {e}")
            return f"解析错误: {e}"

    def process_excel_file(
        self,
        file_path: str,
        sheet_name: Optional[str] = None,
        url_column: str = "H",
        result_column: str = "I",
    ) -> None:
        """
        处理Excel文件，获取微博阅读数并更新

        Args:
            file_path: Excel文件路径
            sheet_name: 工作表名称
            url_column: URL列名
            result_column: 结果列名
        """
        try:
            logger.info(f"开始处理文件: {file_path}")

            # 加载Excel文件
            wb = load_workbook(file_path)
            ws = wb[sheet_name] if sheet_name else wb.active

            # 转换列名到索引
            url_col_idx = openpyxl.utils.column_index_from_string(url_column)
            result_col_idx = openpyxl.utils.column_index_from_string(result_column)

            processed_count = 0
            success_count = 0

            # 遍历所有行
            for row_idx, row in enumerate(
                ws.iter_rows(min_row=2, values_only=True), start=2
            ):
                if len(row) < url_col_idx:
                    continue

                url_cell = row[url_col_idx - 1]

                # 检查是否为有效URL
                if (
                    isinstance(url_cell, str)
                    and url_cell.strip().startswith("http")
                    and "weibo.com" in url_cell
                ):

                    # 获取阅读数
                    views = self.get_weibo_article_views(url_cell.strip())

                    # 更新结果单元格
                    ws.cell(row=row_idx, column=result_col_idx, value=views)

                    processed_count += 1
                    if views.isdigit():
                        success_count += 1

                    logger.info(f"行 {row_idx}: URL={url_cell}, Views={views}")

                    # 避免请求过于频繁
                    time.sleep(1)

            # 保存文件
            wb.save(file_path)

            logger.info(
                f"\n处理完成! 共处理 {processed_count} 个URL, 成功 {success_count} 个"
            )

        except Exception as e:
            logger.error(f"处理Excel文件时出错: {e}")
            raise


def main():
    """主函数"""
    try:
        updater = WeiboViewsUpdater()

        updater.process_excel_file(
            file_path="C:/Users/qwx1425249/Desktop/data/开发者空间案例内外媒运营数据看板.xlsx",
            sheet_name="案例发布计划",
            url_column="H",
            result_column="I",
        )

    except KeyboardInterrupt:
        logger.info("用户中断程序")
    except Exception as e:
        logger.error(f"程序执行出错: {e}")


if __name__ == "__main__":
    main()
