import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from bs4 import BeautifulSoup


# 静态内容获取方法
def fetch_page_static(url, headers, cookies):
    # 设置重试策略
    retry_strategy = Retry(
        total=3,
        status_forcelist=[429, 500, 502, 503, 504],
        allowed_methods=["HEAD", "GET", "OPTIONS"]
    )
    adapter = HTTPAdapter(max_retries=retry_strategy)
    session = requests.Session()
    session.mount("https://", adapter)
    session.mount("http://", adapter)

    # 发起请求
    response = session.get(url, headers=headers, cookies=cookies)
    if response.status_code == 200:
        return response.text
    else:
        print(f"Failed to retrieve the page {url}, status code: {response.status_code}")
        return None


# 提取特定 href 的方法，并去除重复，添加完整 URL
def extract_unique_hrefs_with_base(html_content, base_url="https://www.dianping.com", prefix="/member/"):
    soup = BeautifulSoup(html_content, 'html.parser')
    hrefs = [a['href'] for a in soup.find_all('a', href=True) if a['href'].startswith(prefix)]
    unique_hrefs = list(set(hrefs))  # 去重
    full_urls = [f"{base_url}{href}" for href in unique_hrefs]  # 添加前缀

    # 提取博物馆名称和时间
    museum_names = []
    times = []
    for span in soup.find_all('span', class_='shop'):
        museum_names.append(span.get_text(strip=True))
    for span in soup.find_all('span', class_='time'):
        times.append(span.get_text(strip=True))

    return full_urls, museum_names, times


# 第二次请求每个链接的内容
def fetch_member_pages(hrefs, headers, cookies, museum_names, times):
    for idx, href in enumerate(hrefs):
        print(f"\nFetching content for: {href}")
        page_content = fetch_page_static(href, headers, cookies)
        if page_content:
            # 解析页面内容
            soup = BeautifulSoup(page_content, 'html.parser')

            # 提取名字和地区
            name = soup.find('h2', class_='name')  # 根据 HTML 结构定位名字
            region = soup.find('span', class_='user-groun')  # 根据 HTML 结构定位地区

            name_text = name.get_text(strip=True) if name else "No Name Found"
            region_text = region.get_text(strip=True) if region else "No Region Found"

            # 打印博物馆名称、用户名称、地区和时间
            museum_name = museum_names[idx] if idx < len(museum_names) else "No Museum Found"
            review_time = times[idx] if idx < len(times) else "No Time Found"
            print(f"Museum: {museum_name}, User: {name_text}, Region: {region_text}, Time: {review_time}")
        else:
            print(f"Failed to fetch content for: {href}")


# 主程序入口
if __name__ == "__main__":
    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "Pragma": "no-cache",
        "Sec-Fetch-Dest": "document",
        "Sec-Fetch-Mode": "navigate",
        "Sec-Fetch-Site": "none",
        "Sec-Fetch-User": "?1",
        "Upgrade-Insecure-Requests": "1",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
        "sec-ch-ua": "\"Google Chrome\";v=\"131\", \"Chromium\";v=\"131\", \"Not_A Brand\";v=\"24\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Windows\""
    }
    cookies = {
        "GSI": "#rf_syztwlb@210720a#sl_zdjlwxbdsb@211011l#rf_tsmbyrk@220309b#rf_syjgwlb@220829a",
        "_lxsdk_cuid": "193b9ea6047c8-0f4140802d9a1c-26011851-1fa400-193b9ea6047c8",
        "_lxsdk": "193b9ea6047c8-0f4140802d9a1c-26011851-1fa400-193b9ea6047c8",
        "_hc.v": "5f3147d5-fee0-9e21-6580-0e2304500d27.1733990966",
        "WEBDFPID": "3y5yvz6987235w370055w60x1u8w861z806y0vz376997958615xyz93-2049350965982-1733990965489EKWSWQUfd79fef3d01d5e9aadc18ccd4d0c95071055",
        "qruuid": "076a9357-3474-4e6f-922b-a50157b29ee7",
        "dper": "02027025acc6e33c3a4e3def4c6d9465e6bbb166b2847249046403f94ed6672f0c89af2d6c2c2b8fcf17eff6ec7e89f0a19cece22106656c43d700000000fc240000c3a3de797edccd264b628a7eb2ee432973784decb65feaf5a7af5cbf937f35aa455308b31cb27443585dd299fc0f265c",
        "__CACHE@is_login": "true",
        "__CACHE@referer": "https://account.dianping.com/",
        "logan_custom_report": "",
        "fspop": "test",
        "logan_session_token": "1tx4ebccqaatlnur97ry",
        "ll": "7fd06e815b796be3df069dec7836c3df",
        "Hm_lvt_602b80cf8079ae6591966cc70a3940e7": "1733991053",
        "HMACCOUNT": "3C7D3FF744BDB4E3",
        "Hm_lpvt_602b80cf8079ae6591966cc70a3940e7": "1733991363",
        "_lxsdk_s": "193b9ea6047-a4-ee5-0f8%7C%7C473"
    }

    url = "https://www.dianping.com/shop/k4RpR9GPCDefoitO/review_all"
    print("Fetching static page content...")
    html_content = fetch_page_static(url, headers, cookies)  # 传递 URL 参数
    if html_content:
        print("Extracting unique hrefs with base URL...")
        unique_hrefs_with_base, museum_names, times = extract_unique_hrefs_with_base(html_content)
        print("\nFetching each member page...")
        fetch_member_pages(unique_hrefs_with_base, headers, cookies, museum_names, times)
