# -*- coding: utf-8 -*-
"""
kb189_full_flow_strict.py
Python 3.9.13

功能：
- JS cookie 挑战处理 + 带重试请求
- 解析“会员登入”链接、线路与 token
- RSA 加密
- 登录流程（含 Agreement.html -> AcceptAgreement -> App/Index 预热）
- 下注接口：GetBetFullData / MemberBet

供 GUI 调用：
- login_flow(...)
- get_bet_full_data(...)
- build_bet_payload_triple(...)
- post_member_bet(...)
"""
import re
import time
import json
import binascii
from typing import Optional, Tuple, List, Dict
from urllib.parse import urljoin, urlparse, quote
import requests
from bs4 import BeautifulSoup
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5

# -------------------- 常量 --------------------
UA = ("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
      "(KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36")
BROWSER_ACCEPT_LANG = "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2"
RE_ROBOT = re.compile(r"^robot\d*$", re.I)

def make_headers(base: Optional[dict] = None, *, referer: Optional[str] = None, ajax: bool = False) -> dict:
    h = {"User-Agent": UA, "Connection": "keep-alive", "Accept-Language": BROWSER_ACCEPT_LANG}
    if base:
        h.update(base)
    if referer:
        h["Referer"] = referer
    if ajax:
        h["Accept"] = "application/json, text/javascript, */*; q=0.01"
        h["X-Requested-With"] = "XMLHttpRequest"
    return h

# -------------------- JS 挑战 & 带重试 --------------------
def looks_like_html(resp) -> bool:
    ct = (resp.headers.get("Content-Type") or "").lower()
    return ("html" in ct) or ("text/" in ct) or (ct == "")

def parse_js_cookies(html: str):
    """
    从类似：
      <script>document.cookie='robot7=....; path=/; domain=.kb189.cc'; window.location.reload();</script>
    中解析 cookies: [(name,value,domain,path), ...]
    """
    cookies = []
    for full in re.findall(r'document\.cookie\s*=\s*(?:"|\')([^"\']+)(?:"|\')', html, flags=re.S):
        parts = [p.strip() for p in full.split(';') if p.strip()]
        if not parts or '=' not in parts[0]:
            continue
        name, value = parts[0].split('=', 1)
        domain = None
        path = '/'
        for attr in parts[1:]:
            al = attr.lower()
            if al.startswith('domain='):
                domain = attr.split('=', 1)[1].strip()
            elif al.startswith('path='):
                path = attr.split('=', 1)[1].strip() or '/'
        cookies.append((name, value, domain, path))
    return cookies

def cookie_exists_with_same_value(session, name, value, eff_domain, path):
    for c in session.cookies:
        if c.name != name:
            continue
        if not (c.domain == eff_domain or c.domain.endswith(eff_domain)):
            continue
        if c.path != (path or '/'):
            continue
        if c.value == value:
            return True
    return False

def request_with_js_and_retries(
    session: requests.Session, method: str, url: str, *,
    data=None, headers: Optional[dict] = None, timeout: int = 12,
    max_tries: int = 5, backoff_base: float = 1.0
) -> requests.Response:
    attempts = 0
    while attempts < max_tries:
        attempts += 1
        try:
            req = requests.Request(method.upper(), url, data=data, headers=(headers or make_headers()))
            prep = session.prepare_request(req)
            resp = session.send(prep, timeout=timeout, allow_redirects=True)

            if not (200 <= resp.status_code < 300):
                raise requests.HTTPError(f"HTTP {resp.status_code}")

            resp.encoding = resp.apparent_encoding or "utf-8"
            txt = resp.text

            # 仅当页面里明确设置 document.cookie 时，注入新的 robot* cookie 后重试
            if looks_like_html(resp) and "document.cookie" in txt:
                host = urlparse(url).hostname or ""
                parts = host.split(".")
                default_domain = "." + ".".join(parts[-2:]) if len(parts) >= 2 else host
                js_cookies = parse_js_cookies(txt)

                new_robot = []
                for name, value, domain, path in js_cookies:
                    if not RE_ROBOT.match(name):
                        continue
                    eff_domain = (domain or default_domain)
                    if not cookie_exists_with_same_value(session, name, value, eff_domain, path or "/"):
                        new_robot.append((name, value, domain, path))
                if new_robot:
                    for name, value, domain, path in new_robot:
                        session.cookies.set(name, value, domain=(domain or default_domain), path=(path or "/"))
                    # 注入了新 cookie，重试同一 URL
                    continue

            return resp

        except Exception as e:
            if attempts < max_tries:
                time.sleep(backoff_base * attempts)
            else:
                raise

# -------------------- 页面解析 --------------------
def parse_member_href(html: str, base_url: str) -> Optional[str]:
    soup = BeautifulSoup(html, "html.parser")
    a = soup.find("a", id="btnMember")
    if not a or not a.has_attr("href"):
        return None
    return urljoin(base_url, a["href"])

def extract_lines_and_token(page1_html: str) -> Tuple[List[str], Optional[str]]:
    token = None
    tok_match = re.search(r"token=([0-9A-F]+)", page1_html, re.IGNORECASE)
    if tok_match:
        token = tok_match.group(1)

    lines: List[str] = []
    arr_match = re.search(r'arrLine\s*=\s*\[([^\]]+)\]', page1_html, re.IGNORECASE | re.DOTALL)
    if arr_match:
        arr_blob = arr_match.group(1)
        lines = re.findall(r'https?://[^\s"\']+', arr_blob, flags=re.IGNORECASE)
        if not lines:
            lines = re.findall(r'["\'](https?://[^"\']+)["\']', arr_blob, flags=re.IGNORECASE)
    if not lines:
        lines = re.findall(r'arrLine\.push\(\s*[\'"]([^\'"]+)[\'"]\s*\)', page1_html, flags=re.IGNORECASE)
    return lines, token

# -------------------- RSA 加密 --------------------
EXPONENT_HEX = "010001"
MODULUS_HEX = (
    "BECA095A9D6509E1E78AA35D7198D95DF8B308CDA5E0D202E27452D56C312A5D"
    "B35CD62E159CD1A5CDD614316495F514947AD64AE7D0FD357958E7A66DBAA4DBA"
    "A005AF246C07E992FB4C988E5751328B6D2359CB99C38CDEC45AEBD36D9210F35"
    "17C577ECDA31A48F36D46F8A872C55623DFC2C905988D6BF84BCD0D0D7A33B"
)

def rsa_encrypt_real(text: str) -> str:
    utf8_text = quote(text, safe="~()*!.'")
    n = int(MODULUS_HEX, 16)
    e = int(EXPONENT_HEX, 16)
    rsa_key = RSA.construct((n, e))
    cipher = PKCS1_v1_5.new(rsa_key)
    encrypted_bytes = cipher.encrypt(utf8_text.encode("utf-8"))
    return binascii.hexlify(encrypted_bytes).decode().upper()

# -------------------- 登录流程（含 AcceptAgreement） --------------------
class LoginResult:
    def __init__(self, session: requests.Session, draw_base: str, app_index_url: str):
        self.session = session
        self.draw_base = draw_base
        self.app_index_url = app_index_url

def login_flow(*, base_origin: str, safecode: str, account: str, password: str,
               retries: int = 5, verify_ssl: bool = True, log_fn=lambda s: None) -> Optional[LoginResult]:
    """
    完整登录：
    root -> NavigateByTarget?SafeCode=... -> 会员登入页（Speed）-> 线路逐一尝试：
      /Member/Login -> POST /Member/DoLogin -> [成功]
      -> GET /All/Agreement.html
      -> GET /Member/AcceptAgreement    <-- 保留
      -> GET /App/Index?...             <-- 作为 Ajax Referer
    成功返回 LoginResult；失败返回 None
    """
    session = requests.Session()
    session.headers.update({"User-Agent": UA})
    session.verify = verify_ssl
    if not verify_ssl:
        requests.packages.urllib3.disable_warnings(category=requests.packages.urllib3.exceptions.InsecureRequestWarning)

    base_origin = base_origin.rstrip("/")
    root_url = base_origin + "/"

    # 0) root
    log_fn(f"Visit root: {root_url}")
    request_with_js_and_retries(session, "GET", root_url, headers=make_headers(), max_tries=retries)

    # 1) safecode 导航
    nav_url = f"{base_origin}/Navigation/NavigateByTarget?SafeCode={safecode}"
    log_fn(f"Navigate: {nav_url}")
    nav_resp = request_with_js_and_retries(session, "GET", nav_url, headers=make_headers(referer=root_url), max_tries=retries)

    member_url = parse_member_href(nav_resp.text, nav_url)
    if not member_url:
        log_fn("会员登入链接未找到")
        return None

    # 2) 线路页（Speed）
    log_fn(f"Open speed: {member_url}")
    speed_resp = request_with_js_and_retries(session, "GET", member_url, headers=make_headers(referer=nav_url), max_tries=retries)
    all_lines, token = extract_lines_and_token(speed_resp.text)
    if not all_lines or not token:
        log_fn("未能解析线路或 token")
        return None

    # 3) 逐线路尝试登录
    last_err = ""
    for i, base in enumerate(all_lines):
        try:
            login_url = f"{base}/Member/Login?_={int(time.time()*1000)}&token={token}"
            log_fn(f"[Line {i+1}] Open login: {login_url}")
            request_with_js_and_retries(session, "GET", login_url, headers=make_headers(referer=member_url), max_tries=retries)

            payload = {
                "Account": rsa_encrypt_real(account),
                "Password": rsa_encrypt_real(password),
                "Token": token,
                "Captcha": "",
            }
            post_url = f"{base}/Member/DoLogin"
            log_fn(f"[Line {i+1}] POST login ...")
            resp = request_with_js_and_retries(session, "POST", post_url, data=payload,
                                               headers=make_headers(referer=login_url, ajax=True), max_tries=retries)

            ok = False
            try:
                j = resp.json()
                ok = (str(j.get("Status")) == "1")
            except Exception:
                ok = ('"Status":1' in resp.text)
            if not ok:
                last_err = "登录失败"
                continue

            # 4) Agreement -> AcceptAgreement -> App/Index 预热（✅ 保留 AcceptAgreement）
            agreement_url = f"{base}/All/Agreement.html"
            request_with_js_and_retries(session, "GET", agreement_url, headers=make_headers(referer=login_url), max_tries=retries)

            acceptAgreement_url = urljoin(base, "/Member/AcceptAgreement")
            request_with_js_and_retries(session, "GET", acceptAgreement_url, headers=make_headers(referer=agreement_url), max_tries=retries)

            app_index_url = f"{base}/App/Index?_={int(time.time()*1000)}"
            request_with_js_and_retries(session, "GET", app_index_url, headers=make_headers(referer=agreement_url), max_tries=retries)

            return LoginResult(session=session, draw_base=base, app_index_url=app_index_url)

        except Exception as e:
            last_err = str(e)
            log_fn(f"[Line {i+1}] failed: {e}")
            continue

    log_fn(f"所有线路登录失败：{last_err}")
    return None

# -------------------- 下注接口 --------------------
def get_bet_full_data(session: requests.Session, draw_base: str, app_referer: str, *,
                      handicap_id: int = 1, group_id: int = 47, loop: int = 2, ajaxcount: int = 6,
                      max_tries: int = 5) -> Dict:
    ms = int(time.time() * 1000)
    url = urljoin(draw_base, f"/Home/GetBetFullData?loop={loop}&handicapId={handicap_id}&groupid={group_id}&ajaxcount={ajaxcount}&_={ms}")
    resp = request_with_js_and_retries(session, "GET", url, headers=make_headers(referer=app_referer, ajax=True), max_tries=max_tries)
    try:
        return resp.json()
    except Exception:
        return {"Status": 0, "Message": "Invalid JSON", "Raw": resp.text}

def _two(n: int) -> str:
    return f"{n:02d}"

def build_bet_payload_triple(items: List[int], money: int, odds_str: str, *, bet_type_id: int = 51, play_type_name: str = "三中二") -> Dict:
    items_sorted = sorted(items)
    bet_name = f"{play_type_name} " + ",".join(_two(x) for x in items_sorted)
    sub = {"BetMoney": int(money), "BetItems": items_sorted, "Odds": odds_str}
    bet = {
        "BetTypeId": bet_type_id,
        "BetItems": items_sorted,
        "PlayTypeName": play_type_name,
        "BetName": bet_name,
        "BetMoney": int(money),
        "SubBet": [sub],
    }
    # /Bet/MemberBet 使用表单：BetData(json数组字符串)、UseLastOdds、HandicapId
    return {"BetData": json.dumps([bet], ensure_ascii=False), "UseLastOdds": "false"}

def post_member_bet(session: requests.Session, draw_base: str, app_referer: str, payload_dict: Dict, *,
                    handicap_id: int = 1, max_tries: int = 5) -> Dict:
    url = urljoin(draw_base, "/Bet/MemberBet")
    data = dict(payload_dict)
    data["HandicapId"] = str(int(handicap_id))
    resp = request_with_js_and_retries(session, "POST", url, data=data,
                                       headers=make_headers(referer=app_referer, ajax=True), max_tries=max_tries)
    try:
        return resp.json()
    except Exception:
        return {"Status": 0, "Message": "Invalid JSON", "Raw": resp.text}
