import time
from random import randint
import csv
import logging
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from fake_useragent import UserAgent
from selenium.webdriver.common.keys import Keys

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('scraper.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

class BeyondMenuScraper:
    def __init__(self):
        self.base_url = 'https://www.beyondmenu.com'
        self.ua = UserAgent()
        logger.info("初始化爬虫...")
        self.setup_driver()

    def setup_driver(self):
        try:
            logger.info("设置Chrome选项...")
            options = Options()
            options.add_argument('--no-sandbox')
            options.add_argument('--disable-dev-shm-usage')
            options.add_argument(f'user-agent={self.ua.random}')
            options.add_argument('--disable-blink-features=AutomationControlled')
            options.add_argument('--start-maximized')

            # 添加实验性选项
            options.add_experimental_option('excludeSwitches', ['enable-automation'])
            options.add_experimental_option('useAutomationExtension', False)

            logger.info("启动Chrome浏览器...")
            self.driver = webdriver.Chrome(options=options)
            self.wait = WebDriverWait(self.driver, 10)
            logger.info("Chrome浏览器启动成功")

        except Exception as e:
            logger.error(f"设置浏览器时出错: {str(e)}", exc_info=True)
            raise

    def get_all_locations(self):
        """获取所有可用的位置"""
        locations = []
        try:
            logger.info(f"访问位置页面: {self.base_url}/locations")
            self.driver.get(f"{self.base_url}/locations")
            time.sleep(randint(2, 4))

            logger.info("等待位置元素加载...")
            location_elements = self.wait.until(
                EC.presence_of_all_elements_located(
                    (By.CSS_SELECTOR, 'a[href*="/restaurants/"]')
                )
            )
            logger.info(f"找到 {len(location_elements)} 个位置元素")

            # 记录页面源码以供调试
            with open('locations_page.html', 'w', encoding='utf-8') as f:
                f.write(self.driver.page_source)
            logger.info("已保存位置页面源码")

            for element in location_elements:
                try:
                    location_url = element.get_attribute('href')
                    location_name = element.text.strip()
                    locations.append({
                        'name': location_name,
                        'url': location_url
                    })
                    logger.info(f"添加位置: {location_name} -> {location_url}")
                except Exception as e:
                    logger.error(f"处理位置元素时出错: {str(e)}")
                    continue

        except TimeoutException:
            logger.error("等待位置元素超时")
        except Exception as e:
            logger.error(f"获取位置列表时出错: {str(e)}", exc_info=True)

        logger.info(f"共获取到 {len(locations)} 个位置")
        return locations

    def search_restaurants(self, location):
        """搜索指定位置的餐厅"""
        try:
            logger.info(f"访问首页: {self.base_url}")
            self.driver.get(self.base_url)
            time.sleep(randint(2, 4))

            # 等待搜索框加载
            logger.info("等待搜索框加载...")
            search_input = self.wait.until(
                EC.presence_of_element_located(
                    (By.CSS_SELECTOR, 'input[type="text"], input[placeholder*="Search"], input[aria-label*="Search"]')
                )
            )

            # 输入位置
            logger.info(f"输入位置: {location}")
            search_input.clear()
            search_input.send_keys(location)
            time.sleep(randint(1, 2))

            # 点击搜索按钮或按回车
            try:
                search_button = self.driver.find_element(By.CSS_SELECTOR, 'button[type="submit"]')
                search_button.click()
            except NoSuchElementException:
                logger.info("未找到搜索按钮，尝试按回车")
                search_input.send_keys(Keys.RETURN)

            time.sleep(randint(3, 5))

            # 保存搜索结果页面源码
            with open(f'search_results_{location.replace(" ", "_")}.html', 'w', encoding='utf-8') as f:
                f.write(self.driver.page_source)
            logger.info("已保存搜索结果页面源码")

            return self.get_restaurants_from_page()

        except Exception as e:
            logger.error(f"搜索餐厅时出错: {str(e)}", exc_info=True)
            return []

    def get_restaurants_from_page(self):
        """从当前页面获取餐厅列表"""
        restaurants = []
        try:
            # 滚动页面以加载更多餐厅
            logger.info("开始滚动页面加载更多餐厅...")
            last_height = self.driver.execute_script("return document.body.scrollHeight")
            scroll_attempts = 0
            max_scrolls = 10

            while scroll_attempts < max_scrolls:
                self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
                time.sleep(2)
                new_height = self.driver.execute_script("return document.body.scrollHeight")
                if new_height == last_height:
                    break
                last_height = new_height
                scroll_attempts += 1
                logger.info(f"页面滚动 {scroll_attempts} 次")

            # 尝试多个可能的选择器
            selectors = [
                'div.restaurant-card',
                'div[class*="restaurant"]',
                'a[href*="/restaurant/"]',
                'div.store-info'
            ]

            restaurant_elements = []
            for selector in selectors:
                elements = self.driver.find_elements(By.CSS_SELECTOR, selector)
                if elements:
                    logger.info(f"使用选择器 '{selector}' 找到 {len(elements)} 个餐厅")
                    restaurant_elements = elements
                    break

            if not restaurant_elements:
                logger.warning("未找到任何餐厅元素")
                return []

            for index, element in enumerate(restaurant_elements, 1):
                try:
                    # 获取餐厅信息
                    restaurant_info = self.extract_restaurant_info(element)
                    if restaurant_info:
                        restaurants.append(restaurant_info)
                        logger.info(f"成功获取第 {index} 个餐厅信息: {restaurant_info}")
                    time.sleep(randint(1, 2))
                except Exception as e:
                    logger.error(f"处理第 {index} 个餐厅时出错: {str(e)}")
                    continue

        except Exception as e:
            logger.error(f"获取餐厅列表时出错: {str(e)}", exc_info=True)

        return restaurants

    def extract_restaurant_info(self, element):
        """从餐厅元素中提取信息"""
        try:
            name = address = phone = None

            # 尝试获取名称
            try:
                name = element.find_element(By.CSS_SELECTOR, 'h3, .name, [class*="name"]').text.strip()
            except NoSuchElementException:
                logger.warning("未找到餐厅名称")
                return None

            # 尝试获取地址
            try:
                address = element.find_element(By.CSS_SELECTOR, '[class*="address"], .location').text.strip()
            except NoSuchElementException:
                logger.warning(f"未找到餐厅 '{name}' 的地址")

            # 尝试获取电话
            try:
                phone = element.find_element(By.CSS_SELECTOR, '[class*="phone"], .tel').text.strip()
            except NoSuchElementException:
                logger.warning(f"未找到餐厅 '{name}' 的电话")

            return {
                'name': name,
                'address': address if address else '未找到地址',
                'phone': phone if phone else '未找到电话'
            }

        except Exception as e:
            logger.error(f"提取餐厅信息时出错: {str(e)}")
            return None

    def save_to_csv(self, data, filename):
        """保存数据到CSV文件"""
        try:
            logger.info(f"开始保存数据到 {filename}")
            with open(filename, 'w', newline='', encoding='utf-8') as f:
                writer = csv.DictWriter(f, fieldnames=['name', 'address', 'phone'])
                writer.writeheader()
                writer.writerows(data)
            logger.info(f"数据已保存到 {filename}")
        except Exception as e:
            logger.error(f"保存CSV文件时出错: {str(e)}", exc_info=True)

    def __del__(self):
        """清理资源"""
        if hasattr(self, 'driver'):
            logger.info("关闭浏览器...")
            self.driver.quit()

def main():
    try:
        logger.info("开始运行爬虫...")
        scraper = BeyondMenuScraper()

        # 定义要搜索的位置
        locations = [
            'New York, NY',
            'Los Angeles, CA',
            'Chicago, IL',
            # 可以添加更多位置
        ]

        all_restaurants = []
        for i, location in enumerate(locations, 1):
            logger.info(f"正在处理第 {i}/{len(locations)} 个位置: {location}")
            restaurants = scraper.search_restaurants(location)
            all_restaurants.extend(restaurants)
            logger.info(f"{location} 搜索完成，获取到 {len(restaurants)} 家餐厅")
            time.sleep(randint(3, 6))

        # 保存数据
        scraper.save_to_csv(all_restaurants, 'beyond_menu_restaurants.csv')
        logger.info(f"爬取完成！共获取 {len(all_restaurants)} 家餐厅的数据。")

    except Exception as e:
        logger.error(f"程序运行出错: {str(e)}", exc_info=True)

if __name__ == "__main__":
    main()