import requests
from bs4 import BeautifulSoup
import time
import random
import mysql.connector
from mysql.connector import Error
import re
import os
from qcloud_cos import CosConfig
from qcloud_cos import CosS3Client
from urllib.parse import urlparse, urljoin
from dotenv import load_dotenv

# 加载环境变量
load_dotenv()

# 腾讯云COS配置
COS_SECRET_ID = os.getenv('COS_SECRET_ID')
COS_SECRET_KEY = os.getenv('COS_SECRET_KEY')
COS_REGION = os.getenv('COS_REGION', 'ap-beijing')
COS_BUCKET = os.getenv('COS_BUCKET')

# 初始化COS客户端
if COS_SECRET_ID and COS_SECRET_KEY and COS_BUCKET:
    config = CosConfig(Region=COS_REGION, SecretId=COS_SECRET_ID, SecretKey=COS_SECRET_KEY)
    cos_client = CosS3Client(config)
else:
    cos_client = None
    print("错误: COS配置不完整，无法执行历史数据处理")
    exit(1)

# 随机User-Agent列表
USER_AGENTS = [
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Safari/605.1.15',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36 Edg/121.0.0.0',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36'
]

headers = {
    'User-Agent': random.choice(USER_AGENTS),
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Referer': 'http://m.bazaar.com.cn/'
}

def process_html_content(html_content):
    """
    处理HTML内容（与bazaar_crawler.py中的逻辑一致）
    """
    try:
        # 1. 删除 seajs.use(["global","article"]); 
        html_content = re.sub(r'seajs\.use\(\["global","article"\]\);?', '', html_content)
        
        # 2. 将 gb2312 改成 utf-8
        html_content = re.sub(r'<meta\s+charset=["\']?gb2312["\']?\s*/?>', 
                             '<meta charset="utf-8">', html_content, flags=re.IGNORECASE)
        
        # 3. 在 </head> 前添加样式
        style_tag = '<style>header,footer,.edit-box,.share-box{display: none;}</style>'
        html_content = re.sub(r'</head>', f'{style_tag}\n</head>', html_content, flags=re.IGNORECASE)
        
        return html_content
        
    except Exception as e:
        print(f"HTML内容处理失败: {e}")
        return html_content  # 处理失败时返回原内容

def download_and_upload_html(article_url, article_type, article_id):
    """
    下载HTML页面并上传到腾讯云COS（与bazaar_crawler.py中的逻辑一致）
    """
    if not cos_client or not article_url:
        return None
    
    try:
        # 构建完整URL
        if not article_url.startswith('http'):
            base_url = 'http://m.bazaar.com.cn'
            article_url = urljoin(base_url, article_url)
        
        print(f"正在下载HTML: {article_url}")
        
        # 下载HTML页面
        html_response = requests.get(article_url, headers=headers, timeout=30)
        html_response.encoding = html_response.apparent_encoding
        
        if html_response.status_code != 200:
            print(f"HTML下载失败: {article_url}, 状态码: {html_response.status_code}")
            return None
        
        # 处理HTML内容
        processed_html = process_html_content(html_response.text)
        
        # 生成HTML文件名
        filename = f"html/{article_type}/{article_id}.html"
        
        # 上传HTML到COS
        cos_client.put_object(
            Bucket=COS_BUCKET,
            Body=processed_html.encode('utf-8'),
            Key=filename,
            ContentType='text/html; charset=utf-8'
        )
        
        # 只返回文件路径
        print(f"HTML处理并上传成功，文件路径: {filename}")
        return filename
        
    except Exception as e:
        print(f"HTML下载或上传失败 {article_url}: {e}")
        return None

def get_historical_data():
    """
    从数据库中获取cos_html_url为空的历史数据
    """
    try:
        connection = mysql.connector.connect(
            host='101.35.207.195',
            database='bazzar',
            user='bazzar',
            password='F5xhfKjM3bbsDshE'
        )
        
        if connection.is_connected():
            cursor = connection.cursor()
            
            # 查询cos_html_url为空的记录
            query = """
                SELECT article_id, type, title, link 
                FROM article 
                WHERE cos_html_url IS NULL OR cos_html_url = ''
                ORDER BY date DESC
            """
            
            cursor.execute(query)
            results = cursor.fetchall()
            
            print(f"找到 {len(results)} 条需要处理的历史数据")
            return results
            
    except Error as e:
        print(f"数据库查询错误: {e}")
        return []
    finally:
        if connection.is_connected():
            cursor.close()
            connection.close()

def update_cos_html_url(article_id, cos_html_url):
    """
    更新数据库中的cos_html_url字段
    """
    try:
        connection = mysql.connector.connect(
            host='101.35.207.195',
            database='bazzar',
            user='bazzar',
            password='F5xhfKjM3bbsDshE'
        )
        
        if connection.is_connected():
            cursor = connection.cursor()
            
            update_query = """
                UPDATE article 
                SET cos_html_url = %s 
                WHERE article_id = %s
            """
            
            cursor.execute(update_query, (cos_html_url, article_id))
            connection.commit()
            
            if cursor.rowcount > 0:
                print(f"成功更新文章 {article_id} 的COS URL")
                return True
            else:
                print(f"未找到文章 {article_id}")
                return False
            
    except Error as e:
        print(f"数据库更新错误: {e}")
        return False
    finally:
        if connection.is_connected():
            cursor.close()
            connection.close()

def process_historical_data():
    """
    批量处理历史数据
    """
    print("开始处理历史数据...")
    
    # 获取历史数据
    historical_data = get_historical_data()
    
    if not historical_data:
        print("没有需要处理的历史数据")
        return
    
    success_count = 0
    failed_count = 0
    
    for article_id, article_type, title, link in historical_data:
        print(f"\n处理文章: {title} (ID: {article_id})")
        
        try:
            # 下载并上传HTML
            cos_html_url = download_and_upload_html(link, article_type, article_id)
            
            if cos_html_url:
                # 更新数据库
                if update_cos_html_url(article_id, cos_html_url):
                    success_count += 1
                    print(f"✓ 成功处理: {title}")
                else:
                    failed_count += 1
                    print(f"✗ 数据库更新失败: {title}")
            else:
                failed_count += 1
                print(f"✗ HTML下载失败: {title}")
            
            # 添加延迟避免请求过快
            time.sleep(random.uniform(2, 5))
            
        except Exception as e:
            failed_count += 1
            print(f"✗ 处理失败: {title}, 错误: {e}")
    
    print(f"\n处理完成！")
    print(f"成功: {success_count} 条")
    print(f"失败: {failed_count} 条")
    print(f"总计: {len(historical_data)} 条")

if __name__ == '__main__':
    process_historical_data()