import requests
import openpyxl
from openpyxl import load_workbook
from bs4 import BeautifulSoup
import time
import re
import openpyxl
from openpyxl import load_workbook

import time


def get_csdn_article_views(url):
    try:
        # 提取 URL 中最后一个斜杠后的 ID
        article_id = url.split("/")[-1]

        # 构造 API 请求地址
        api_url = f"https://blog.51cto.com/blog/info-stat?id={article_id}"
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36",
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Referer": "https://www.google.com/",
        }

        # 发送 API 请求
        response = requests.get(api_url, headers=headers, timeout=10)
        print(f"API Request URL: {api_url}")
        print("API Response status code:", response.status_code)
        print("API Response content:", response.text)

        # 检查响应状态码
        if response.status_code != 200:
            return f"API 请求失败，状态码: {response.status_code}"

        # 解析 JSON 数据
        data = response.json()
        if data.get("status") == 0 and "pv" in data["data"]:
            return str(data["data"]["pv"])
        else:
            return "未找到阅读数（API 返回数据异常）"

    except requests.exceptions.RequestException as e:
        return f"网络请求错误: {e}"
    except Exception as e:
        return f"发生错误: {e}"


def process_excel_file(
    file_path: str,
    sheet_name: str = None,  # 默认当前活动页签
    column_name: str = "R",  # 默认列名 "D"
    column_name1: str = "S",  # 默认列名 "E"
) -> None:
    """
    从Excel文件的指定页签和列中读取URL，统计阅读数并保存结果。
    """
    wb = load_workbook(file_path)
    ws = wb[sheet_name] if sheet_name else wb.active

    # 将列名（如 "E"）转换为列索引（如 5）
    column_index = openpyxl.utils.column_index_from_string(column_name)
    column_index1 = openpyxl.utils.column_index_from_string(column_name1)

    # 获取指定列的所有行值（跳过空值和非URL）
    for row_index, row in enumerate(
        ws.iter_rows(min_col=column_index, max_col=column_index1, values_only=True),
        start=1,
    ):
        cell_value = row[0]
        if isinstance(cell_value, str) and cell_value.strip().startswith("http"):
            views = get_csdn_article_views(cell_value)
            ws.cell(row=row_index, column=column_index1, value=views)
            time.sleep(1)  # 避免请求过于频繁
            print(f"✅ 已更新行 {row_index} 的数据 URL: {cell_value}, Views: {views}")

    wb.save(file_path)
    print(f"✨ 结果已保存到: {file_path}")


# 示例使用
process_excel_file(
    file_path="C:/Users/qwx1425249/Desktop/data/开发者空间案例内外媒运营数据看板.xlsx",
    sheet_name="案例发布计划",  # 指定页签名称（可选）
    column_name="R",  # 指定列名（可选）
    column_name1="S",
)


# import random

# def get_blogs_article_views(url):
#     try:
#         headers = {
#             "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36"
#         }
#         response = requests.get(url, headers=headers)
#         response.raise_for_status()  # 检查请求是否成功
#         soup = BeautifulSoup(response.text, 'html.parser')
#         blog_pattern = r'bbs\.huaweicloud\.com/blogs/'
#         forum_pattern = r'bbs\.huaweicloud\.com/forum/'
#         if re.search(blog_pattern, url):
#             # 查找阅读数的元素（博客的HTML结构可能会变，需要根据实际情况调整）
#             #<span class="view-count" data-count="4407">4.4k+</span>
#             read_count_element = soup.find("span", class_="view-count")
#             if read_count_element:
#                 read_count = read_count_element.get("data-count")
#                 return f"{read_count}"
#             else:
#                 return "未找到阅读数元素，请检查页面结构或URL是否正确。"
#         elif re.search(forum_pattern, url):
#             # <span data-v-014405fd="" class="read-num">浏览量：57</span>
#             # <span data-v-014405fd="" class="read-num">浏览量：2.5K</span>
#             read_count_element = soup.find("div", class_="date-time")
#             if read_count_element:
#                 read_count = read_count_element.text.strip()
#                 read_count = read_count.replace("浏览量", "").strip()
#                 if read_count.__contains__("K"):
#                     read_count = read_count.replace("K", "").strip()
#                     read_count = int(float(read_count) * 1000)+random.randint(1,100)
#                 return f"{read_count}"
#             else:
#                 return "未找到阅读数元素，请检查页面结构或URL是否正确。"
#     except Exception as e:
#         return f"发生错误: {e}"

# def process_excel_file(
# file_path: str,
# sheet_name: str = None,  # 默认当前活动页签
# column_name: str = "R",   # 默认列名 "D"
# column_name1: str = "S"   # 默认列名 "E"
# ) -> None:
#     """
#     从Excel文件的指定页签和列中读取URL，统计阅读数并保存结果。

#     Args:
#         file_path (str): 输入Excel文件路径。
#         output_file (str): 输出Excel文件路径。
#         sheet_name (str, optional): 页签名称。默认为当前活动页签。
#         column_name (str, optional): 列名（如 "A", "B", "E"）。默认为 "E"。
#     """
#     # 加载Excel文件
#     wb = load_workbook(file_path)
#     ws = wb[sheet_name] if sheet_name else wb.active

#     # 将列名（如 "E"）转换为列索引（如 5）
#     column_index = openpyxl.utils.column_index_from_string(column_name)
#     column_index1 = openpyxl.utils.column_index_from_string(column_name1)

#     # 获取指定列的所有行值（跳过空值和非URL）
#     for row_index,row in enumerate(ws.iter_rows(min_col=column_index, max_col=column_index1, values_only=True),start=1):
#         cell_value = row[0]
#         if isinstance(cell_value, str) and cell_value.strip().startswith("http"):
#             views = get_blogs_article_views(cell_value)
#             ws.cell(row=row_index, column=column_index1, value=views)
#             time.sleep(1)  # 避免请求过于频繁
#             print(f"已更新行 {row_index} 的数据 URL: {cell_value}, Views: {views}")

#     wb.save(file_path)
#     print(f"结果已保存到: {file_path}")

# # 示例使用
# process_excel_file(
# file_path="C:/Users/lwx1364338/Desktop/外媒阅读数刷新/开发者空间案例内外媒运营数据看板.xlsx",
# sheet_name="案例发布计划",  # 指定页签名称（可选）
# column_name="R",       # 指定列名（可选）
# column_name1="S"
# )
