# steam_topselling_scraper.py

import requests
import logging
import time
import re
from typing import List, Optional
from bs4 import BeautifulSoup
from models import TopSellingGame

# 配置日志
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')


class SteamTopSellingScraper:
    """
    通过 Steam 搜索接口获取全球热销榜（Top Sellers）排行榜：
      https://store.steampowered.com/search/results/
        ?query
        &start={start}
        &count={count}
        &dynamic_data=
        &sort_by=_ASC
        &category1=998
        &snr=1_7_7_globaltopsellers_7
        &filter=globaltopsellers
        &infinite=1
        &l=<language>&cc=<country_code>
    返回一个 TopSellingGame 列表，按排名升序排列（rank=1,2,3…）。
    """

    SEARCH_URL = (
        "https://store.steampowered.com/search/results/"
        "?query&start={start}&count={count}"
        "&dynamic_data=&sort_by=_ASC"
        "&category1=998"
        "&snr=1_7_7_globaltopsellers_7"
        "&filter=globaltopsellers"
        "&infinite=1"
    )

    DEFAULT_HEADERS = {
        "User-Agent": (
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
            "(KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36"
        ),
        "Accept": "application/json, text/javascript, */*; q=0.01",
        "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
        "Accept-Encoding": "gzip, deflate",
        "Cache-Control": "max-age=0",
        "Pragma": "no-cache",
        "DNT": "1",
        "Referer": "https://store.steampowered.com/",
        "sec-ch-ua": '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Windows\"",
        "Sec-Fetch-Dest": "empty",
        "Sec-Fetch-Mode": "cors",
        "Sec-Fetch-Site": "same-origin",
        "Sec-Fetch-User": "?1",
    }

    def __init__(self,
                 language: str = "schinese",
                 country_code: str = "CN",
                 max_retries: int = 3,
                 timeout: int = 30):
        """
        Args:
            language (str): 页面语言，例如 'schinese'（简体中文）或 'english'（英文）；
            country_code (str): 国家代码，例如 'CN' 或 'US'；
            max_retries (int): 请求失败后最大重试次数（含第一次请求）；
            timeout (int): 单次请求的超时时间（秒）。
        """
        self.language = language
        self.country_code = country_code
        self.max_retries = max_retries
        self.timeout = timeout

        self.session = requests.Session()
        self.session.headers.update(self.DEFAULT_HEADERS)
        logging.info(
            f"SteamTopSellingScraper 初始化 → 语言={language}, 国家={country_code}, "
            f"重试={max_retries} 次, 超时={timeout}s"
        )
        self.proxies = None  # 如果需要代理，可以在这里设置：{"http":"...", "https":"..."}

    def _request_json(self, url: str) -> Optional[dict]:
        """
        内部方法：带重试的 GET 请求，返回解析后的 JSON 或 None。
        """
        attempt = 0
        while attempt < self.max_retries:
            attempt += 1
            try:
                logging.info(f"[搜索榜单] 第 {attempt}/{self.max_retries} 次尝试 → {url}")
                self.session.headers["Referer"] = "https://store.steampowered.com/"
                resp = self.session.get(url, timeout=self.timeout, proxies=self.proxies, allow_redirects=True)
                resp.raise_for_status()
                return resp.json()
            except requests.exceptions.Timeout:
                logging.warning(f"第 {attempt} 次请求超时 (timeout={self.timeout}s)。")
            except requests.exceptions.ConnectionError as e:
                logging.warning(f"第 {attempt} 次连接失败：{e}")
            except requests.exceptions.HTTPError as e:
                logging.error(f"第 {attempt} 次请求返回 HTTP 错误：{e.response.status_code}")
                return None
            except requests.exceptions.JSONDecodeError as e:
                logging.error(f"第 {attempt} 次返回内容无法解析为 JSON：{e}")
                return None
            except requests.exceptions.RequestException as e:
                logging.error(f"第 {attempt} 次请求遇到未知错误：{e}")
                return None

            if attempt < self.max_retries:
                logging.info("等待 2 秒后重试…")
                time.sleep(2)

        logging.error(f"已尝试 {self.max_retries} 次，仍无法获取数据。")
        return None

    def get_top_selling_via_search(self, start: int = 0, count: int = 100) -> List[TopSellingGame]:
        """
        通过搜索接口获取全球热销榜，返回一个按 rank 排序的 TopSellingGame 列表。

        Args:
            start (int): 起始索引（0 表示第 1 条）。如果只想拿前 100 条，则 start=0, count=100；
                         若要获取前 200 条，可分两次：start=0,count=100 然后 start=100,count=100。
            count (int): 本次请求要拉取的条目数量（最大 100）。
        """
        url = self.SEARCH_URL.format(start=start, count=count)
        url = f"{url}&l={self.language}&cc={self.country_code}"
        json_data = self._request_json(url)
        if not json_data:
            logging.error("未能拿到搜索接口的 JSON 数据。")
            return []

        # JSON 里有个 "results_html"，它包含所有 <a class="search_result_row">…</a> 的 HTML 段
        results_html = json_data.get("results_html", "")
        if not results_html.strip():
            logging.warning("搜索接口返回的 results_html 为空，可能没有热销榜结果。")
            return []

        soup = BeautifulSoup(results_html, "html.parser")
        item_tags = soup.find_all("a", class_=re.compile(r"search_result_row"))
        if not item_tags:
            logging.warning("在 results_html 中未找到任何 <a class='search_result_row'>。")
            return []

        games: List[TopSellingGame] = []
        for idx, a_tag in enumerate(item_tags):
            try:
                rank = start + idx + 1
                appid_str = a_tag.get("data-ds-appid", "")
                if not appid_str.isdigit():
                    continue
                appid = int(appid_str)

                # 1) 名称
                name_span = a_tag.find("span", class_="title")
                name = name_span.get_text(strip=True) if name_span else "未知游戏名"

                # 2) 好评率 & 好评总数
                review_percent: Optional[int] = None
                review_total: Optional[int] = None

                # 只按 "search_reviewscore" 这个 class 来匹配
                review_div = a_tag.find("div", class_="search_reviewscore")
                if review_div:
                    summary_span = review_div.find("span", class_="search_review_summary")
                    if summary_span:
                        tooltip = summary_span.get("data-tooltip-html", "")
                        logging.debug(f"AppID={appid} 的 tooltip 文本: {tooltip}")

                        # 先匹配“评测总数”（可能带逗号），再匹配好评百分比
                        m_rev = re.search(r"([\d,]+).*?(\d+)%", tooltip)
                        if m_rev:
                            review_total = int(m_rev.group(1).replace(",", ""))
                            review_percent = int(m_rev.group(2))
                        else:
                            logging.debug(f"AppID={appid} 无法匹配好评正则，tooltip: {tooltip}")

                # 3) 价格 & 折扣
                price: Optional[float] = None
                discount_percent: Optional[int] = None
                discount_block = a_tag.find("div", class_="discount_block")
                if discount_block:
                    d_attr = discount_block.get("data-discount")
                    if d_attr and d_attr.isdigit():
                        discount_percent = int(d_attr)
                    final_div = discount_block.find("div", class_="discount_final_price")
                    if final_div:
                        text = final_div.get_text(strip=True)
                        if re.search(r"(免费|Free)", text, re.I):
                            price = 0.0
                        else:
                            num = re.sub(r"[^\d.]", "", text)
                            if num:
                                price = float(num)
                else:
                    price_div = a_tag.find("div", class_="search_price")
                    if price_div:
                        price_text = price_div.get_text(separator=" ", strip=True)
                        if re.search(r"(免费|Free)", price_text, re.I):
                            price = 0.0
                        else:
                            parts = price_text.split()
                            if parts:
                                num = re.sub(r"[^\d.]", "", parts[0])
                                if num:
                                    price = float(num)

                games.append(
                    TopSellingGame(
                        rank=rank,
                        appid=appid,
                        name=name,
                        price=price,
                        discount_percent=discount_percent,
                        concurrent_players=None,
                        review_percent=review_percent,
                        review_total=review_total
                    )
                )
            except Exception as e:
                logging.error(f"解析 rank={start+idx+1} 条目时出错：{e}")
                continue

        return games

    def get_top_selling_games(self) -> List[TopSellingGame]:
        """
        默认拉取前 100 名。若要获取更多，可以自行调用 get_top_selling_via_search。
        """
        return self.get_top_selling_via_search(start=0, count=100)
