import re
import requests
import pymysql
from bs4 import BeautifulSoup
import logging
import sys
import time

# 配置日志
logging.basicConfig(
    filename='image_link_process.log',
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s'
)

# 配置控制台日志
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
console_handler.setFormatter(formatter)
logging.getLogger().addHandler(console_handler)
logger = logging.getLogger()

# 数据库配置
DB_CONFIG = {
    'host': '211.154.25.60',
    'user': 'zazhi4',
    'password': 'DBdz3nr8NeAxY37S',
    'database': 'zazhi4',
    'port': 3307,
    'charset': 'utf8mb4',
    'cursorclass': pymysql.cursors.DictCursor
}

# 图片基础URL
BASE_URL = 'https://zimg.zazhiejia.com'

def connect_to_database():
    """连接到MySQL数据库"""
    logger.info("尝试连接到数据库...")
    try:
        # 增加超时设置
        conn = pymysql.connect(
            **DB_CONFIG,
            connect_timeout=10
        )
        logger.info("数据库连接成功!")
        return conn
    except pymysql.Error as err:
        logger.error(f"数据库连接失败: {err}")
        return None

def test_image_url(url):
    """测试图片URL是否可访问"""
    try:
        response = requests.head(url, timeout=5)
        return response.status_code == 200
    except requests.RequestException as e:
        logger.warning(f"测试URL失败: {url}, 错误: {str(e)}")
        return False

def join_url(base, path):
    """智能拼接URL，自动处理斜杠问题"""
    base = base.rstrip('/')
    path = path if path.startswith('/') else '/' + path
    return base + path

def process_goods_kind(goods_id, content):
    """处理商品描述中的图片链接"""
    if not content:
        logger.warning(f"商品ID {goods_id} 的内容为空")
        return content, 0, 0
    
    logger.info(f"处理商品ID {goods_id} 的内容...")
    soup = BeautifulSoup(content, 'html.parser')
    imgs = soup.find_all('img')
    
    total_links = len(imgs)
    logger.info(f"发现 {total_links} 个图片链接")
    
    modified = False
    inaccessible_links = 0
    
    for i, img in enumerate(imgs):
        if img.has_attr('src'):
            src = img['src']
            # 检查是否为不带http/https的链接
            if not (src.startswith('http://') or src.startswith('https://')):
                # 使用智能URL拼接函数
                new_src = join_url(BASE_URL, src)
                
                # 记录变更
                logger.info(f"[{i+1}/{total_links}] 替换链接: {src} -> {new_src}")
                img['src'] = new_src
                modified = True
                
                # 测试链接是否可访问（可选，会减慢处理速度）
                if not test_image_url(new_src):
                    inaccessible_links += 1
                    logger.warning(f"商品ID {goods_id} 的图片 {new_src} 不可访问")
    
    result = str(soup) if modified else content
    return result, total_links, inaccessible_links

def sequential_process():
    """使用单线程顺序处理商品图片链接"""
    logger.info("开始单线程顺序处理...")
    
    # 连接数据库
    conn = connect_to_database()
    if not conn:
        logger.error("无法连接数据库，退出程序")
        return
    
    try:
        with conn.cursor() as cursor:
            # 获取所有包含图片的商品记录
            logger.info("执行SQL查询以获取商品记录...")
            query = """
                SELECT id, goods_kind FROM fa_shop_goods 
                WHERE goods_kind LIKE '%<img%' AND goods_kind LIKE '%src=%'
            """
            logger.info(f"执行查询: {query}")
            cursor.execute(query)
            records = cursor.fetchall()
            
            if not records:
                logger.warning("没有找到包含图片的商品记录，请检查SQL查询条件")
                return
            
            logger.info(f"找到 {len(records)} 条可能包含图片的商品记录")
            
            # 检查第一条记录
            sample_record = records[0]
            sample_id = sample_record['id']
            sample_content = sample_record['goods_kind']
            logger.info(f"示例记录ID: {sample_id}")
            content_preview = sample_content[:100] if sample_content else 'None'
            logger.info(f"示例内容片段: {content_preview}...")
            
            # 正则匹配相对链接
            has_relative_links = bool(re.search(r'<img\s+[^>]*src\s*=\s*["\'][^"\'h][^"\']*["\']', sample_content or ''))
            logger.info(f"示例记录是否包含相对链接: {has_relative_links}")
            
            # 统计信息
            total_processed = 0
            total_updated = 0
            problem_records = []
            
            # 逐条处理记录
            for i, record in enumerate(records):
                goods_id = record['id']
                goods_kind = record['goods_kind']
                
                logger.info(f"[{i+1}/{len(records)}] 正在处理商品ID: {goods_id}")
                
                if not goods_kind:
                    logger.warning(f"商品ID {goods_id} 的内容为空，跳过")
                    continue
                
                # 检查是否包含相对路径的图片链接
                if re.search(r'<img\s+[^>]*src\s*=\s*["\'][^"\'h][^"\']*["\']', goods_kind):
                    try:
                        new_content, total, inaccessible = process_goods_kind(goods_id, goods_kind)
                        total_processed += 1
                        
                        # 如果内容有变更，更新数据库
                        if new_content != goods_kind:
                            update_query = "UPDATE fa_shop_goods SET goods_kind = %s WHERE id = %s"
                            cursor.execute(update_query, (new_content, goods_id))
                            conn.commit()  # 每次更新后立即提交
                            total_updated += 1
                            logger.info(f"已更新商品ID: {goods_id}")
                        else:
                            logger.info(f"商品ID {goods_id} 无需更新")
                        
                        # 记录问题链接
                        if inaccessible > 0:
                            problem_records.append({
                                'id': goods_id,
                                'inaccessible_count': inaccessible
                            })
                            
                        # 短暂暂停，减轻数据库负担
                        time.sleep(0.5)
                        
                    except Exception as e:
                        logger.error(f"处理商品ID {goods_id} 时出错: {str(e)}")
                        conn.rollback()  # 发生错误时回滚
                else:
                    logger.info(f"商品ID {goods_id} 不包含相对链接，跳过")
            
            # 输出统计信息
            logger.info("处理完成，统计信息如下:")
            logger.info(f"已处理商品数: {total_processed}")
            logger.info(f"已更新商品数: {total_updated}")
            logger.info(f"存在问题的商品数: {len(problem_records)}")
            
            if problem_records:
                logger.info("问题商品详情:")
                for record in problem_records:
                    logger.info(f"  ID: {record['id']}, 不可访问链接数: {record['inaccessible_count']}")
    
    except pymysql.Error as err:
        logger.error(f"执行SQL失败: {err}")
        conn.rollback()
    except Exception as e:
        logger.error(f"发生未预期的错误: {str(e)}")
        conn.rollback()
    finally:
        conn.close()
        logger.info("数据库连接已关闭")

if __name__ == "__main__":
    logger.info("开始处理商品图片链接...")
    sequential_process()
    logger.info("处理完成！详细日志请查看 image_link_process.log 文件")

