﻿#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
印度尼西亚分批餐厅详情爬虫执行器 - 专门处理第9-12份文件
"""

import multiprocessing
import time
import os
import sys
from botasaurus import *
from botasaurus.browser_decorator import browser
from botasaurus_driver.user_agent import UserAgent
import csv
import random

def load_restaurant_urls(urls_file):
    """
    从CSV文件加载餐厅URL列表。
    """
    restaurant_urls = []
    try:
        with open(urls_file, mode='r', encoding='utf-8', newline='') as file:
            reader = csv.DictReader(file)
            for row in reader:
                if 'city' in row and 'url' in row:
                    restaurant_urls.append({
                        'city': row['city'],
                        'url': row['url'].split('#')[0]
                    })
        print(f"成功加载 {len(restaurant_urls)} 个餐厅URL。")
        return restaurant_urls
    except FileNotFoundError:
        print(f"错误: 无法找到文件 '{urls_file}'。")
        return []
    except Exception as e:
        print(f"读取 '{urls_file}' 时出错: {e}")
        return []

def load_checkpoint(checkpoint_file):
    """
    加载断点文件，返回最后处理的URL索引。
    """
    if os.path.exists(checkpoint_file):
        try:
            with open(checkpoint_file, mode='r', encoding='utf-8') as file:
                return int(file.read().strip())
        except:
            print(f"警告: 无法读取断点文件 '{checkpoint_file}'，从头开始。")
    return 0

def save_checkpoint(checkpoint_file, index):
    """
    保存断点到文件。
    """
    try:
        with open(checkpoint_file, mode='w', encoding='utf-8') as file:
            file.write(str(index))
    except Exception as e:
        print(f"错误: 无法保存断点到 '{checkpoint_file}': {e}")

def save_restaurant_data(filepath, data):
    """
    将餐厅数据保存到CSV文件。如果文件不存在，则创建文件并写入表头。
    """
    try:
        file_exists = os.path.exists(filepath)
        
        with open(filepath, mode='a', encoding='utf-8', newline='') as file:
            fieldnames = ['城市', '品牌名称', '评分', '平均价格', '一级品类', '二级品类', '评论数目', '电话', '邮箱', '品牌链接', '地址']
            writer = csv.DictWriter(file, fieldnames=fieldnames)
            
            # 如果文件不存在，写入表头
            if not file_exists:
                writer.writeheader()

            # 写入数据行，空值会被自动处理
            writer.writerow(data)
            file.flush()  # 确保数据立即写入磁盘
    except Exception as e:
        print(f"错误: 无法写入数据到 '{filepath}': {e}")

@browser(
    user_agent=UserAgent.REAL,
    chrome_executable_path=r"C:\Program Files\Google\Chrome\Application\chrome.exe",
    cache=True,
    block_images=True,
    lang="zh-CN",
    reuse_driver=True,
    close_on_crash=True,
    create_error_logs=True,
    headless=False,
)
def scrape_batch_restaurant_details(driver, data):
    """
    处理指定批次文件的餐厅详情爬虫
    """
    batch_file = data.get("batch_file")
    if not batch_file:
        raise ValueError("必须提供 batch_file 参数")
        
    # 从批次文件名提取批次号
    batch_name = os.path.splitext(os.path.basename(batch_file))[0]
    output_file = f"{batch_name}_restaurant_details.csv"
    checkpoint_file = f"{batch_name}_checkpoint.txt"
    requests_per_minute = 2
    min_delay = 60 / requests_per_minute
    
    print(f" 开始处理批次文件: {batch_file}")
    print(f" 输入文件: {batch_file}")
    print(f" 输出文件: {output_file}")
    print(f" 断点文件: {checkpoint_file}")
    
    # 加载餐厅URL列表
    restaurant_urls = load_restaurant_urls(batch_file)
    if not restaurant_urls:
        print("没有可处理的餐厅URL，程序退出。")
        return

    # 加载断点
    start_index = load_checkpoint(checkpoint_file)
    if start_index >= len(restaurant_urls):
        print("所有URL已处理完成。")
        return

    print(f"从索引 {start_index} 开始处理，共 {len(restaurant_urls)} 个URL。")

    for index in range(start_index, len(restaurant_urls)):
        task = restaurant_urls[index]
        city = task['city']
        url = task['url']
        print(f"\n--- 开始处理: {city} - {url} ---")

        try:
            # 加载页面
            driver.get(url)
            time.sleep(random.uniform(min_delay, min_delay + 3))
            # 提取数据
            data = {'城市': city.replace('餐厅', ''), '品牌链接': url}

            # 品牌名称
            try:
                brand_name = driver.select("h1.biGQs._P.hzzSG").text
                data['品牌名称'] = brand_name
            except:
                data['品牌名称'] = ''
                print("无法提取品牌名称。")

            # 评分
            try:
                rating = driver.select('div[data-automation="bubbleRatingValue"]').text
                data['评分'] = rating
            except:
                data['评分'] = ''
                print("无法提取评分。")

            # 客单均价
            try:
                price_range_text = driver.select("span.HUMGB.cPbcf span.bTeln:last-of-type a").text
                if '-' in price_range_text and any(char.isdigit() for char in price_range_text):
                    price_range_text = price_range_text.replace('', '')
                    prices = []
                    # 更安全地解析价格，避免非数字部分导致错误
                    for p in price_range_text.split('-'):
                        cleaned_p = ''.join(filter(str.isdigit, p))
                        if cleaned_p:
                            prices.append(int(cleaned_p))

                    # 确保列表不为空再进行除法运算
                    if prices:
                        data['平均价格'] = sum(prices) / len(prices)
                    else:
                        data['平均价格'] = price_range_text  # 如果解析失败，则保留原始文本
                else:
                    data['平均价格'] = price_range_text
            except:
                data['平均价格'] = ''
                print("无法提取价格范围。")

            # 一级品类
            try:
                category = driver.select("span.HUMGB.cPbcf a:last-of-type").text
                data['一级品类'] = category
            except:
                data['一级品类'] = ''
                print("无法提取一级品类。")

            # 二级品类
            try:
                category_lv2 = driver.select_all("span.HUMGB.cPbcf a")[1].text
                data['二级品类'] = category_lv2
            except:
                data['二级品类'] = ''
                print("无法提取二级品类。")

            # 评论数目
            try:
                review_count_text = driver.select(
                    'div.CsAqy a[href="#REVIEWS"] div[data-automation="bubbleReviewCount"]').text
                review_count = ''.join(filter(str.isdigit, review_count_text))
                data['评论数目'] = review_count
            except:
                data['评论数目'] = ''
                print("无法提取评论数目。")

            # 门店联系电话
            try:
                phone = driver.select("a[href^='tel:']").text.replace(' ', '')
                data['电话'] = phone
            except:
                data['电话'] = ''
                print("无法提取电话。")

            # 品牌地址
            try:
                address = driver.select("button.Tbrbj span").text
                data['地址'] = address
            except:
                data['地址'] = ''
                print("无法提取地址。")

            # 邮箱
            try:
                mail = driver.select("a[href^='mailto:']").get_attribute('href')
                data['邮箱'] = mail.replace('mailto:', '')
            except:
                data['邮箱'] = ''
                print("无法提取邮箱。")

            # 保存数据
            save_restaurant_data(output_file, data)
            print(f" 成功保存数据: {data['品牌名称']} ({url})")
            # 更新断点
            save_checkpoint(checkpoint_file, index + 1)
        except Exception as e:
            print(f" 处理 {url} 时出错: {e}")
            # 继续下一个URL，避免程序中断

    print(f" 批次文件 {batch_file} 处理完成！")

def run_batch_restaurant(batch_file):
    """为单个批次文件运行餐厅详情爬虫的进程函数"""
    print(f" 启动进程处理批次文件: {batch_file}")
    start_time = time.time()
    
    try:
        # 检查批次文件是否存在
        if not os.path.exists(batch_file):
            print(f" 错误: 找不到批次文件 '{batch_file}'。")
            return f"{batch_file}: 文件不存在"
        
        # 执行餐厅详情爬虫
        result = scrape_batch_restaurant_details(data={"batch_file": batch_file})
        elapsed = time.time() - start_time
        print(f" 批次文件 {batch_file} 处理完成，耗时: {elapsed:.2f} 秒")
        return f"{batch_file}: 成功 ({elapsed:.2f}s)"
        
    except Exception as e:
        elapsed = time.time() - start_time
        print(f" 批次文件 {batch_file} 处理失败: {e}，耗时: {elapsed:.2f} 秒")
        return f"{batch_file}: 失败 - {str(e)}"

def main():
    """主函数 - 并发执行印度尼西亚第9-12份餐厅详情爬虫"""
    print("=" * 60)
    print("  印度尼西亚分批餐厅详情爬虫执行器启动")
    print("=" * 60)
    
    start_time = time.time()
    
    # 定义要并发处理的批次文件列表
    batch_files = [
        "印度尼西亚_restaurant_urls_第9份.csv",
        "印度尼西亚_restaurant_urls_第10份.csv",
        "印度尼西亚_restaurant_urls_第11份.csv",
        "印度尼西亚_restaurant_urls_第12份.csv"
    ]
    
    # 检查所有批次文件是否存在
    available_files = []
    for batch_file in batch_files:
        if os.path.exists(batch_file):
            available_files.append(batch_file)
            print(f" 找到批次文件: {batch_file}")
        else:
            print(f"  警告: 找不到批次文件: {batch_file}，将跳过")
    
    if not available_files:
        print(" 错误: 没有可用的批次文件，请检查文件是否存在")
        return
    
    print(f"\n 准备并发处理 {len(available_files)} 个批次文件")
    print(f"  最大并发数: 4 (浏览器实例)")
    print("-" * 60)
    
    # 创建进程池，最大并发数为4
    with multiprocessing.Pool(processes=4) as pool:
        results = pool.map(run_batch_restaurant, available_files)
    
    # 输出结果汇总
    print("\n" + "=" * 60)
    print(" 并发处理结果汇总")
    print("=" * 60)
    for result in results:
        print(f"   {result}")
    
    total_time = time.time() - start_time
    print(f"\n  总耗时: {total_time:.2f} 秒")
    print(" 所有批次文件并发处理完毕！")

if __name__ == "__main__":
    # 确保在Windows上正确运行多进程
    if sys.platform.startswith('win'):
        multiprocessing.freeze_support()
    
    main()
