import requests
import openpyxl
from openpyxl import load_workbook
from bs4 import BeautifulSoup
import time
import re
import urllib3
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
import ssl
import warnings

# 完全禁用所有警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
warnings.filterwarnings('ignore')

class ForceTLSv1Adapter(HTTPAdapter):
    """强制使用TLSv1.2适配器"""
    
    def init_poolmanager(self, *args, **kwargs):
        ctx = ssl.create_default_context()
        ctx.set_ciphers('DEFAULT:@SECLEVEL=1')
        ctx.check_hostname = False
        ctx.verify_mode = ssl.CERT_NONE
        # 强制使用TLSv1.2
        ctx.options |= ssl.OP_NO_SSLv2
        ctx.options |= ssl.OP_NO_SSLv3
        ctx.options |= ssl.OP_NO_TLSv1
        ctx.options |= ssl.OP_NO_TLSv1_1
        kwargs['ssl_context'] = ctx
        return super().init_poolmanager(*args, **kwargs)

def get_csdn_article_views_enhanced(url):
    """
    增强版的CSDN文章阅读量获取函数
    """
    try:
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
            "Accept-Encoding": "gzip, deflate, br",
            "Connection": "keep-alive",
            "Upgrade-Insecure-Requests": "1",
            "Sec-Fetch-Dest": "document",
            "Sec-Fetch-Mode": "navigate",
            "Sec-Fetch-Site": "none"
        }

        # 创建会话并使用强制TLS适配器
        session = requests.Session()
        session.mount("https://", ForceTLSv1Adapter())
        
        # 更激进的超时和重试设置
        retry_strategy = Retry(
            total=3,
            backoff_factor=1,
            status_forcelist=[429, 500, 502, 503, 504],
            allowed_methods=["GET", "POST"],
            raise_on_status=False
        )
        adapter = HTTPAdapter(max_retries=retry_strategy)
        session.mount("http://", adapter)
        session.mount("https://", adapter)

        # 使用更长的超时时间
        response = session.get(
            url,
            headers=headers,
            verify=False,
            timeout=(10, 30),  # 连接超时10秒，读取超时30秒
            allow_redirects=True
        )
        response.raise_for_status()

        # 原有的解析逻辑保持不变
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 方法1：从meta标签中获取阅读量
        view_count_meta = soup.find('meta', attrs={'name': 'article:views_count'})
        if view_count_meta:
            return view_count_meta.get('content', '').strip()

        # 方法2：从页面数据块中提取
        data_element = soup.find('div', {'data-report-view': True})
        if data_element:
            report_data = data_element.get('data-report-view', '')
            match = re.search(r'"read":(\d+)', report_data)
            if match:
                return match.group(1)

        # 方法3：从阅读量元素中提取
        read_count_element = soup.find('span', class_='read-count')
        if read_count_element:
            count_text = read_count_element.get_text().strip()
            if 'k' in count_text.lower():
                count_value = float(re.search(r'[\d.]+', count_text).group()) * 1000
                return str(int(count_value))
            match = re.search(r'\d+', count_text)
            if match:
                return match.group()

        return "未找到阅读量"
        
    except Exception as e:
        return f"错误: {str(e)}"

def process_excel_file_enhanced(
        file_path: str,
        sheet_name: str = None,
        url_column: str = "F",
        result_column: str = "G",
        delay: float = 3.0
) -> None:
    """
    增强版的Excel处理函数
    """
    try:
        wb = load_workbook(file_path)
        ws = wb[sheet_name] if sheet_name else wb.active

        url_col_idx = openpyxl.utils.column_index_from_string(url_column)
        result_col_idx = openpyxl.utils.column_index_from_string(result_column)

        processed_count = 0
        skipped_count = 0
        error_count = 0

        print(f"开始处理工作表: {ws.title}")
        print(f"URL列: {url_column}, 结果列: {result_column}")
        print(f"总行数: {ws.max_row}, 延迟: {delay}秒/请求")

        # 从第2行开始处理
        for row_idx in range(2, ws.max_row + 1):
            url_cell = ws.cell(row=row_idx, column=url_col_idx)
            result_cell = ws.cell(row=row_idx, column=result_col_idx)

            # 跳过空单元格或已有数据的单元格
            if not url_cell.value:
                skipped_count += 1
                continue

            if result_cell.value and isinstance(result_cell.value, (int, float)):
                skipped_count += 1
                continue

            url = str(url_cell.value).strip()
            if not url.startswith("http"):
                skipped_count += 1
                continue

            try:
                # 清理URL参数（CSDN的分享链接参数可能导致问题）
                clean_url = re.sub(r'\?.*$', '', url)
                
                views = get_csdn_article_views_enhanced(clean_url)
                result_cell.value = views
                processed_count += 1

                status = "✓" if views.isdigit() else "✗"
                print(f"行 {row_idx}: {status} {clean_url[:45]}... → {views}")

                # 每处理3条保存一次
                if processed_count % 3 == 0:
                    wb.save(file_path)
                    print(f"自动保存: 已处理 {processed_count} 条记录")

            except Exception as e:
                error_count += 1
                result_cell.value = f"处理错误: {str(e)[:30]}"
                print(f"行 {row_idx}: ✗ 处理失败 - {str(e)}")

            time.sleep(delay)  # 防止请求过快被封

        # 最终保存文件
        wb.save(file_path)
        print(f"\n处理完成! 共处理 {processed_count} 条链接")
        print(f"跳过 {skipped_count} 条, 错误 {error_count} 条")
        print(f"结果已保存至: {file_path}")

    except Exception as e:
        print(f"处理Excel时出错: {str(e)}")
        try:
            wb.save(file_path)
            print("已保存当前进度")
        except:
            print("保存进度失败")

# 使用示例
if __name__ == "__main__":
    # 测试单个URL
    test_url = "https://blog.csdn.net/devcloud/article/details/144668305"
    print(f"测试URL: {test_url}")
    result = get_csdn_article_views_enhanced(test_url)
    print(f"测试结果: {result}")
    
    # 处理Excel文件
    process_excel_file_enhanced(
        file_path="C:/Users/qwx1425249/Desktop/data/开发者空间案例内外媒运营数据看板.xlsx",
        sheet_name="案例发布计划",
        url_column="F",
        result_column="G",
        delay=3.0
    )