import time
import random
from random import randint
import csv
import logging
import undetected_chromedriver as uc
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException
import urllib.parse

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('scraper.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

class YelpScraper:
    def __init__(self):
        self.base_url = 'https://www.yelp.com'
        self.setup_driver()

    def setup_driver(self):
        logger.info("设置Chrome浏览器...")

        options = uc.ChromeOptions()
        options.add_argument('--no-sandbox')
        options.add_argument('--disable-dev-shm-usage')
        options.add_argument('--disable-gpu')
        options.add_argument('--disable-extensions')
        options.add_argument('--disable-popup-blocking')
        options.add_argument('--disable-blink-features=AutomationControlled')
        options.add_argument('--disable-infobars')
        options.add_argument('--window-size=1920,1080')
        options.add_argument('--start-maximized')

        # 随机user agent
        user_agents = [
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36 Edg/91.0.864.59',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0'
        ]
        options.add_argument(f'--user-agent={random.choice(user_agents)}')

        # 添加代理（如果有）
        # options.add_argument('--proxy-server=http://your-proxy-ip:port')

        # 创建driver - 修复headless问题
        try:
            self.driver = uc.Chrome(options=options)
        except Exception as e:
            logger.error(f"使用选项启动Chrome失败: {str(e)}")
            logger.info("尝试使用默认配置启动...")
            self.driver = uc.Chrome()

        self.wait = WebDriverWait(self.driver, 15)

        # 设置cookies
        self.setup_cookies()

    def setup_cookies(self):
        logger.info("设置cookies...")
        try:
            self.driver.get(self.base_url)
            time.sleep(3)

            # 添加一些基本cookie
            self.driver.add_cookie({"name": "cookieconsent_status", "value": "dismiss"})
            self.driver.add_cookie({"name": "sessionId", "value": f"{random.randint(1000000, 9999999)}"})

            # 刷新页面
            self.driver.refresh()
            time.sleep(2)
        except Exception as e:
            logger.error(f"设置cookies时出错: {str(e)}")

    def human_like_scroll(self):
        """模拟人类滚动行为"""
        try:
            # 获取页面高度
            page_height = self.driver.execute_script("return document.body.scrollHeight")
            screen_height = self.driver.execute_script("return window.innerHeight")

            # 分段慢慢滚动
            for i in range(0, page_height, screen_height // 2):
                # 使用平滑滚动
                self.driver.execute_script(f"window.scrollTo({{top: {i}, behavior: 'smooth'}});")
                time.sleep(random.uniform(0.5, 1.5))  # 随机等待

            # 随机上下滚动一下，更像人类
            for _ in range(3):
                random_scroll = random.randint(-500, 500)
                self.driver.execute_script(f"window.scrollBy(0, {random_scroll});")
                time.sleep(random.uniform(0.3, 0.7))

            # 最后回到顶部
            self.driver.execute_script("window.scrollTo(0, 0);")
            time.sleep(1)

        except Exception as e:
            logger.error(f"人类滚动模拟出错: {str(e)}")

    def get_restaurant_data(self, location):
        restaurants = []
        offset = 0
        max_retries = 3
        retry_count = 0

        encoded_location = urllib.parse.quote(location)

        # 首先手动处理验证
        try:
            logger.info("首先访问Yelp主页并等待手动处理可能的验证...")
            self.driver.get(self.base_url)
            time.sleep(5)

            # 检查是否被要求验证
            if "captcha" in self.driver.page_source.lower() or "verify" in self.driver.page_source.lower() or "check" in self.driver.page_source.lower():
                logger.warning("检测到可能的验证页面，等待手动处理...")
                input("请在浏览器中处理验证（如有），然后按Enter继续...")
                time.sleep(3)
        except Exception as e:
            logger.error(f"访问主页时出错: {str(e)}")

        while True:
            try:
                url = f"{self.base_url}/search?find_desc=Restaurants&find_loc={encoded_location}&start={offset}"
                logger.info(f"\n正在访问URL: {url}")

                self.driver.get(url)
                # 增加等待时间
                time.sleep(random.uniform(10, 15))

                # 检查是否被重定向到验证页面
                current_url = self.driver.current_url
                logger.info(f"当前URL: {current_url}")

                # 检查是否需要验证
                if ("captcha" in self.driver.page_source.lower() or
                    "verify" in self.driver.page_source.lower() or
                    "check" in self.driver.page_source.lower() or
                    "confirm you're a person" in self.driver.page_source.lower()):
                    logger.warning("检测到验证页面，等待手动处理...")
                    input("请在浏览器中完成验证，然后按Enter继续...")
                    time.sleep(5)
                    # 重新加载搜索页面
                    self.driver.get(url)
                    time.sleep(random.uniform(5, 8))

                # 保存页面源码以供调试
                with open(f'yelp_search_{offset}.html', 'w', encoding='utf-8') as f:
                    f.write(self.driver.page_source)
                logger.info("已保存搜索结果页面")

                # 检查页面标题和URL是否符合预期
                page_title = self.driver.title
                logger.info(f"页面标题: {page_title}")

                # 如果没有搜索结果，可能是被重定向了
                if "search" not in current_url.lower() or "restaurants" not in page_title.lower():
                    logger.warning(f"可能被重定向，页面标题: {page_title}, URL: {current_url}")
                    retry_count += 1
                    if retry_count <= max_retries:
                        # 尝试清除cookies并重新来过
                        self.driver.delete_all_cookies()
                        logger.info("已清除所有cookies，重新尝试...")
                        time.sleep(random.uniform(15, 30))
                        continue
                    else:
                        logger.error("多次尝试后仍被重定向，跳过此位置")
                        break

                # 模拟人类滚动
                self.human_like_scroll()

                # 使用更强大的JavaScript检测餐厅元素
                restaurant_info = self.extract_restaurants_via_js()

                if restaurant_info:
                    restaurants.extend(restaurant_info)
                    logger.info(f"通过JavaScript提取方式找到 {len(restaurant_info)} 个餐厅")
                else:
                    # 如果JS提取失败，使用传统方式
                    logger.info("JavaScript提取失败，尝试使用传统选择器")

                    # 尝试多个选择器获取餐厅元素
                    selectors = [
                        'a[href*="/biz/"]',  # 最通用的选择器
                        'div[data-testid*="search-result"]',
                        'div[class*="mainAttributes"] a[href*="/biz/"]',
                        'h3[class*="css"] a[href*="/biz/"]',
                        'div[class*="container"] h3 a[href*="/biz/"]'
                    ]

                    restaurant_elements = []
                    for selector in selectors:
                        try:
                            elements = self.driver.find_elements(By.CSS_SELECTOR, selector)
                            if elements:
                                filtered_elements = [e for e in elements if '/biz/' in e.get_attribute('href')]
                                if filtered_elements:
                                    logger.info(f"使用选择器 '{selector}' 找到 {len(filtered_elements)} 个餐厅")
                                    restaurant_elements = filtered_elements
                                    break
                        except Exception as e:
                            logger.error(f"使用选择器 '{selector}' 时出错: {str(e)}")
                            continue

                    if not restaurant_elements:
                        logger.warning("未找到任何餐厅元素")
                        # 如果还是没找到，尝试使用更宽松的XPath
                        try:
                            elements = self.driver.find_elements(
                                By.XPATH, "//a[contains(@href, '/biz/')]"
                            )
                            if elements:
                                restaurant_elements = elements
                                logger.info(f"使用XPath找到 {len(elements)} 个餐厅")
                        except Exception as e:
                            logger.error(f"使用XPath时出错: {str(e)}")

                    # 处理找到的餐厅元素
                    if restaurant_elements:
                        processed = self.process_restaurant_elements(restaurant_elements)
                        if processed:
                            restaurants.extend(processed)
                    else:
                        retry_count += 1
                        if retry_count <= max_retries:
                            logger.info(f"重试获取餐厅列表 ({retry_count}/{max_retries})...")
                            time.sleep(random.uniform(15, 30))
                            continue
                        else:
                            logger.error("达到最大重试次数，跳过此页")
                            break

                # 每页保存一次
                self.save_to_csv(restaurants, f'restaurants_{location.replace(", ", "_")}_partial.csv')

                # 检查是否有下一页
                next_page = False
                try:
                    next_button = self.driver.find_element(By.CSS_SELECTOR, 'a[aria-label="Next"]')
                    if next_button:
                        next_page = True
                except:
                    try:
                        # 尝试另一种方式检测下一页
                        next_buttons = self.driver.find_elements(By.XPATH, '//a[contains(@class, "next-link") or contains(@class, "next") or contains(@aria-label, "Next")]')
                        if next_buttons:
                            next_page = True
                    except:
                        next_page = False

                if not next_page:
                    logger.info("没有检测到下一页，结束搜索")
                    break

                offset += 10
                # 随机等待较长时间再加载下一页
                wait_time = random.uniform(20, 30)
                logger.info(f"等待 {wait_time:.1f} 秒后加载下一页...")
                time.sleep(wait_time)

                # 重置重试计数
                retry_count = 0

            except TimeoutException:
                logger.error("页面加载超时")
                retry_count += 1
                if retry_count <= max_retries:
                    logger.info(f"重试中... ({retry_count}/{max_retries})")
                    time.sleep(random.uniform(20, 30))
                    continue
                break
            except Exception as e:
                logger.error(f"获取餐厅列表时出错: {str(e)}", exc_info=True)
                retry_count += 1
                if retry_count <= max_retries:
                    logger.info(f"重试中... ({retry_count}/{max_retries})")
                    time.sleep(random.uniform(20, 30))
                    continue
                break

        return restaurants

    def extract_restaurants_via_js(self):
        """使用JavaScript提取餐厅信息"""
        try:
            # 使用JavaScript提取所有餐厅链接和名称
            js_script = """
            var restaurants = [];
            var links = document.querySelectorAll('a[href*="/biz/"]');

            for (var i = 0; i < links.length; i++) {
                var href = links[i].getAttribute('href');
                // 确保链接是餐厅链接，而不是评论或其他链接
                if (href && href.indexOf('/biz/') > -1 && href.indexOf('?') === -1) {
                    var name = links[i].textContent.trim();
                    if (name) {
                        restaurants.push({
                            name: name,
                            url: href
                        });
                    }
                }
            }

            return restaurants;
            """

            restaurants = self.driver.execute_script(js_script)
            logger.info(f"JS找到 {len(restaurants)} 个餐厅")

            # 清除重复项
            unique_restaurants = []
            seen_urls = set()

            for restaurant in restaurants:
                if restaurant['url'] not in seen_urls:
                    seen_urls.add(restaurant['url'])
                    unique_restaurants.append(restaurant)

            logger.info(f"去重后剩余 {len(unique_restaurants)} 个餐厅")

            # 获取每个餐厅的详细信息
            result = []
            for restaurant in unique_restaurants[:10]:  # 限制每页处理的餐厅数量
                try:
                    url = restaurant['url']
                    if not url.startswith('http'):
                        url = self.base_url + url

                    logger.info(f"处理餐厅: {restaurant['name']} -> {url}")

                    # 在新标签页打开详情页
                    self.driver.execute_script("window.open('');")
                    self.driver.switch_to.window(self.driver.window_handles[1])
                    self.driver.get(url)
                    time.sleep(random.uniform(5, 8))

                    # 保存详情页
                    page_source = self.driver.page_source
                    with open(f'restaurant_detail_{url.split("/")[-1]}.html', 'w', encoding='utf-8') as f:
                        f.write(page_source)

                    # 使用JavaScript提取地址和电话
                    detail_script = """
                    var result = {address: null, phone: null};

                    // 尝试多种方式查找地址
                    var addressElements = document.querySelectorAll('p[class*="address"], p[class*="css"], address, [data-testid="address"]');
                    for (var i = 0; i < addressElements.length; i++) {
                        var text = addressElements[i].textContent.trim();
                        if (text && text.includes(',')) {
                            result.address = text;
                            break;
                        }
                    }

                    // 尝试多种方式查找电话
                    var phoneElements = document.querySelectorAll('p[class*="phone"], p[class*="css"], [data-testid="phone"]');
                    for (var i = 0; i < phoneElements.length; i++) {
                        var text = phoneElements[i].textContent.trim();
                        if (text && (text.includes('-') || text.includes('('))) {
                            result.phone = text;
                            break;
                        }
                    }

                    return result;
                    """

                    details = self.driver.execute_script(detail_script)

                    result.append({
                        'name': restaurant['name'],
                        'address': details['address'] if details['address'] else '未找到地址',
                        'phone': details['phone'] if details['phone'] else '未找到电话'
                    })

                    # 关闭详情页，回到搜索结果
                    self.driver.close()
                    self.driver.switch_to.window(self.driver.window_handles[0])
                    time.sleep(random.uniform(3, 5))

                except Exception as e:
                    logger.error(f"处理餐厅 {restaurant['name']} 时出错: {str(e)}")
                    # 确保回到主标签页
                    if len(self.driver.window_handles) > 1:
                        self.driver.close()
                        self.driver.switch_to.window(self.driver.window_handles[0])
                    continue

            return result

        except Exception as e:
            logger.error(f"JavaScript提取出错: {str(e)}")
            return []

    def process_restaurant_elements(self, elements):
        """处理餐厅元素列表，获取详细信息"""
        processed = []

        for element in elements[:10]:  # 限制每页处理的餐厅数量
            try:
                href = element.get_attribute('href')
                if not href or '/biz/' not in href:
                    continue

                name = element.text.strip()
                if not name:
                    name = "未知餐厅"

                logger.info(f"处理餐厅: {name} -> {href}")

                # 访问详情页
                self.driver.execute_script("window.open('');")
                self.driver.switch_to.window(self.driver.window_handles[1])
                self.driver.get(href)
                time.sleep(random.uniform(5, 8))

                # 模拟人类滚动
                self.human_like_scroll()

                # 保存详情页
                with open(f'restaurant_detail_{href.split("/")[-1]}.html', 'w', encoding='utf-8') as f:
                    f.write(self.driver.page_source)

                # 获取地址和电话
                address = phone = None

                # 尝试多个选择器获取地址
                address_selectors = [
                    'p[class*="css-chan6m"]',
                    'p[class*="address"]',
                    'address',
                    'p[class*="css-1ccz"]',
                    '[data-testid="address"]',
                    'p.css-1p9ibgf'  # 新增常见类
                ]

                for selector in address_selectors:
                    try:
                        address_element = self.driver.find_element(By.CSS_SELECTOR, selector)
                        address = address_element.text.strip()
                        if address and ',' in address:
                            logger.info(f"找到地址: {address}")
                            break
                    except:
                        continue

                # 尝试多个选择器获取电话
                phone_selectors = [
                    'p[class*="css-1p9ibgf"]',
                    'p[class*="phone"]',
                    '[data-testid="phone"]',
                    'p.css-qyp8bo',  # 新增常见类
                    'p:contains("-")'
                ]

                for selector in phone_selectors:
                    try:
                        phone_element = self.driver.find_element(By.CSS_SELECTOR, selector)
                        phone = phone_element.text.strip()
                        if phone and ('-' in phone or '(' in phone):
                            logger.info(f"找到电话: {phone}")
                            break
                    except:
                        continue

                processed.append({
                    'name': name,
                    'address': address if address else '未找到地址',
                    'phone': phone if phone else '未找到电话'
                })

                # 关闭详情页，回到搜索结果
                self.driver.close()
                self.driver.switch_to.window(self.driver.window_handles[0])
                time.sleep(random.uniform(3, 5))

            except Exception as e:
                logger.error(f"处理餐厅元素时出错: {str(e)}")
                # 确保回到主标签页
                if len(self.driver.window_handles) > 1:
                    self.driver.close()
                    self.driver.switch_to.window(self.driver.window_handles[0])
                continue

        return processed

    def save_to_csv(self, data, filename):
        """保存数据到CSV文件"""
        try:
            logger.info(f"开始保存数据到 {filename}")
            with open(filename, 'w', newline='', encoding='utf-8') as f:
                writer = csv.DictWriter(f, fieldnames=['name', 'address', 'phone'])
                writer.writeheader()
                writer.writerows(data)
            logger.info(f"数据已保存到 {filename}")
        except Exception as e:
            logger.error(f"保存CSV文件时出错: {str(e)}")

    def __del__(self):
        """清理资源"""
        if hasattr(self, 'driver'):
            logger.info("关闭浏览器...")
            self.driver.quit()

def main():
    try:
        logger.info("开始运行爬虫...")
        scraper = YelpScraper()

        # 定义要爬取的城市
        locations = [
            'New York, NY',
            # 'Los Angeles, CA',
            # 'Chicago, IL',
        ]

        all_restaurants = []
        for i, location in enumerate(locations, 1):
            logger.info(f"正在处理第 {i}/{len(locations)} 个位置: {location}")
            restaurants = scraper.get_restaurant_data(location)
            all_restaurants.extend(restaurants)
            logger.info(f"{location} 爬取完成，获取到 {len(restaurants)} 家餐厅")

            # 每处理完一个位置保存一次，避免数据丢失
            scraper.save_to_csv(restaurants, f'restaurants_{location.replace(", ", "_")}.csv')

            time.sleep(random.uniform(15, 30))  # 位置之间等待较长时间

        # 保存全部数据
        scraper.save_to_csv(all_restaurants, 'all_restaurants.csv')
        logger.info(f"爬取完成！共获取 {len(all_restaurants)} 家餐厅的数据。")

    except Exception as e:
        logger.error(f"程序运行出错: {str(e)}", exc_info=True)

if __name__ == "__main__":
    main()