import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
import random
import mysql.connector
from mysql.connector import Error
import re
from datetime import datetime
import os
from qcloud_cos import CosConfig
from qcloud_cos import CosS3Client
from urllib.parse import urlparse, urljoin
from dotenv import load_dotenv

# 加载环境变量
load_dotenv()

# 腾讯云COS配置
COS_SECRET_ID = os.getenv('COS_SECRET_ID')
COS_SECRET_KEY = os.getenv('COS_SECRET_KEY')
COS_REGION = os.getenv('COS_REGION', 'ap-beijing')
COS_BUCKET = os.getenv('COS_BUCKET')

# 初始化COS客户端
if COS_SECRET_ID and COS_SECRET_KEY and COS_BUCKET:
    config = CosConfig(Region=COS_REGION, SecretId=COS_SECRET_ID, SecretKey=COS_SECRET_KEY)
    cos_client = CosS3Client(config)
else:
    cos_client = None
    print("警告: COS配置不完整，将跳过HTML备份功能")

# 随机User-Agent列表
USER_AGENTS = [
    # 原有配置保留
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Safari/605.1.15',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36 Edg/121.0.0.0',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36',
    'Mozilla/5.0 (Linux; Android 13; SM-G998B) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Mobile Safari/537.36',
    'Mozilla/5.0 (iPhone; CPU iPhone OS 16_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Mobile/15E148 Safari/604.1',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36',
    'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
    'Mozilla/5.0 (iPad; CPU OS 16_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Mobile/15E148 Safari/604.1',
    'Mozilla/5.0 (Linux; Android 14; Pixel 7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Mobile Safari/537.36',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.3.1 Safari/605.1.15'
]

headers = {
    'User-Agent': random.choice(USER_AGENTS),  # 随机选择User-Agent
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Referer': 'http://m.bazaar.com.cn/'
}

def download_and_upload_html(article_url, article_type, article_id):
    """
    下载HTML页面并上传到腾讯云COS
    """
    if not cos_client or not article_url:
        return None
    
    try:
        # 构建完整URL
        if not article_url.startswith('http'):
            base_url = 'http://m.bazaar.com.cn'
            article_url = urljoin(base_url, article_url)
        
        print(f"正在下载HTML: {article_url}")
        
        # 下载HTML页面
        html_response = requests.get(article_url, headers=headers, timeout=30)
        html_response.encoding = html_response.apparent_encoding
        
        if html_response.status_code != 200:
            print(f"HTML下载失败: {article_url}, 状态码: {html_response.status_code}")
            return None
        
        # 处理HTML内容（调用独立的处理方法）
        processed_html = process_html_content(html_response.text)
        
        # 生成HTML文件名
        filename = f"html/{article_type}/{article_id}.html"
        
        # 上传HTML到COS
        cos_client.put_object(
            Bucket=COS_BUCKET,
            Body=processed_html.encode('utf-8'),
            Key=filename,
            ContentType='text/html; charset=utf-8'
        )
        
        # 只返回文件路径
        print(f"HTML处理并上传成功，文件路径: {filename}")
        return filename
        
    except Exception as e:
        print(f"HTML下载或上传失败 {article_url}: {e}")
        return None

def get_articles(url):
    try:
        response = requests.get(url, headers=headers)
        response.encoding = response.apparent_encoding
        soup = BeautifulSoup(response.text, 'html.parser')
        
        articles = []
        for item in soup.select('div.channel-list > div.list-item'):
            img_tag = item.select_one('img')
            img_url = img_tag.get('data-original') or img_tag.get('src')
            
            title = item.select_one('h3 a').text.strip()
            link = item.select_one('h3 a')['href']
            date = datetime.now().strftime('%Y-%m-%d')
            
            articles.append({
                'title': title,
                'date': date,
                'link': link,
                'image': img_url
            })
        return articles
    except Exception as e:
        print(f"请求失败: {e}")
        return []

def save_to_csv(data, filename='bazaar_fashion.csv'):
    df = pd.DataFrame(data)
    df = df[['type', 'title', 'date', 'link', 'image']]  # 新增type字段
    df.to_csv(filename, index=False, encoding='utf-8-sig')
    print(f"已保存 {len(data)} 条数据到 {filename}")

def check_article_exists(cursor, article_id):
    """
    检查文章是否已存在于数据库中
    """
    check_query = "SELECT article_id, cos_html_url FROM article WHERE article_id = %s"
    cursor.execute(check_query, (article_id,))
    result = cursor.fetchone()
    return result

def save_to_database(data):
    try:
        connection = mysql.connector.connect(
            host='101.35.207.195',
            database='bazzar',
            user='bazzar',
            password='F5xhfKjM3bbsDshE'
        )
        
        if connection.is_connected():
            cursor = connection.cursor()
            
            # 插入语句，只添加HTML备份URL字段
            insert_query = """
                INSERT IGNORE INTO article 
                (type, title, date, link, image, article_id, cos_html_url)
                VALUES (%s, %s, %s, %s, %s, %s, %s)
            """
            
            records = []
            skipped_count = 0
            processed_count = 0
            
            for item in data:
                article_number = re.search(r'/(\d+)\.shtml$', item['link'])
                if article_number:
                    article_id = f"{item['type']}_{article_number.group(1)}"
                    
                    # 检查文章是否已存在
                    existing_article = check_article_exists(cursor, article_id)
                    
                    if existing_article:
                        print(f"跳过已存在文章: {item['title']} (ID: {article_id})")
                        if existing_article[1]:  # 如果已有COS URL
                            print(f"  已有COS备份: {existing_article[1]}")
                        skipped_count += 1
                        continue
                    
                    print(f"\n处理新文章: {item['title']}")
                    processed_count += 1
                    
                    # 下载并上传HTML页面到COS
                    cos_html_url = download_and_upload_html(
                        item['link'], 
                        item['type'], 
                        article_id
                    )
                    
                    records.append((
                        item['type'],
                        item['title'],
                        item['date'],
                        item['link'],
                        item['image'],
                        article_id,
                        cos_html_url
                    ))
                    
                    # 添加延迟避免请求过快
                    time.sleep(random.uniform(2, 5))
            
            if records:
                cursor.executemany(insert_query, records)
                connection.commit()
                print(f"\n成功写入 {cursor.rowcount} 条新数据到数据库")
            else:
                print("\n没有新数据需要写入")
            
            print(f"处理统计: 新增 {processed_count} 条，跳过 {skipped_count} 条")
            
    except Error as e:
        print(f"数据库错误: {e}")
    finally:
        if connection.is_connected():
            cursor.close()
            connection.close()

def process_html_content(html_content):
    try:
        # 1. 删除 seajs.use(["global","article"]); 
        html_content = re.sub(r'seajs\.use\(\["global","article"\]\);?', '', html_content)
        
        # 2. 将 gb2312 改成 utf-8
        html_content = re.sub(r'<meta\s+charset=["\']?gb2312["\']?\s*/?>', 
                             '<meta charset="utf-8">', html_content, flags=re.IGNORECASE)
        
        # 3. 在 </head> 前添加样式
        style_tag = '<style>header,footer,.edit-box,.share-box{display: none;}</style>'
        html_content = re.sub(r'</head>', f'{style_tag}\n</head>', html_content, flags=re.IGNORECASE)
        
        return html_content
        
    except Exception as e:
        print(f"HTML内容处理失败: {e}")
        return html_content  # 处理失败时返回原内容

if __name__ == '__main__':
    all_articles = []
    urlList = [
        {'type': 'fashion', 'url': 'http://m.bazaar.com.cn/fashion'},
        {'type': 'beauty', 'url': 'http://m.bazaar.com.cn/beauty'},
        {'type': 'lifestyle', 'url': 'http://m.bazaar.com.cn/lifestyle'},
    ]
    
    for idx, page_info in enumerate(urlList, 1):
        print(f"正在抓取第 {idx} 页 ({page_info['type']})...")
        articles = get_articles(page_info['url'])
        all_articles.extend([dict(item, type=page_info['type']) for item in articles])
        time.sleep(random.uniform(3, 10))  # 每次请求后随机延迟
        
    save_to_database(all_articles)