print("Starting Server...") import re import requests from bs4 import BeautifulSoup from fastapi import FastAPI, HTTPException, Query from fastapi.responses import HTMLResponse from enum import Enum from fastapi.responses import HTMLResponse from fastapi.templating import Jinja2Templates from fastapi.requests import Request from fastapi.responses import StreamingResponse import httpx import io # Headers to mimic a browser HEADERS = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) " "AppleWebKit/537.36 (KHTML, like Gecko) " "Chrome/115.0.0.0 Safari/537.36", "Accept": "application/json", "Referer": "http://hato.malupdaterosx.moe/", "Origin": "http://hato.malupdaterosx.moe", } app = FastAPI() ANIME_DB_URL = "https://raw.githubusercontent.com/Fribb/anime-lists/refs/heads/master/anime-offline-database-reduced.json" anime_data_cache = None def load_anime_data(): global anime_data_cache if anime_data_cache is None: try: resp = requests.get(ANIME_DB_URL, timeout=15) resp.raise_for_status() # Corrected line: anime_data_cache = resp.json() except Exception as e: raise HTTPException(status_code=500, detail=f"Failed to load anime data: {str(e)}") return anime_data_cache @app.get("/map/mal/{mal_id}") def mal_to_kitsu(mal_id: int): """ Convert MAL ID to Kitsu ID using Fribb's anime data. """ anime_list = load_anime_data() for anime in anime_list: if anime.get("mal_id") == mal_id: kitsu_id = anime.get("kitsu_id") if kitsu_id: return {"kitsu_id": kitsu_id} raise HTTPException(status_code=404, detail=f"No Kitsu ID for MAL ID {mal_id}") raise HTTPException(status_code=404, detail=f"MAL ID {mal_id} not found") @app.get("/map/kitsu/{kitsu_id}") def kitsu_to_mal(kitsu_id: int): """ Convert Kitsu ID to MAL ID using Fribb's anime data. """ anime_list = load_anime_data() for anime in anime_list: if anime.get("kitsu_id") == kitsu_id: mal_id = anime.get("mal_id") if mal_id: return {"mal_id": mal_id} raise HTTPException(status_code=404, detail=f"No MAL ID for Kitsu ID {kitsu_id}") raise HTTPException(status_code=404, detail=f"Kitsu ID {kitsu_id} not found") ANILIST_API_URL = "https://graphql.anilist.co" # GraphQL to convert MAL ID to AniList ID and get images ANILIST_QUERY = """ query ($malId: Int) { Media(idMal: $malId, type: ANIME) { id bannerImage coverImage { extraLarge large medium } } } """ @app.get("/anime/image") async def get_anime_image( mal_id: int = Query(..., description="MyAnimeList anime ID"), cover: bool = Query(False, description="Return cover image instead of banner") ): payload = { "query": ANILIST_QUERY, "variables": {"malId": mal_id} } async with httpx.AsyncClient() as client: res = await client.post(ANILIST_API_URL, json=payload) data = res.json() media = data.get("data", {}).get("Media") if not media: return {"error": "Anime not found"} # Get the image URL if cover: image_url = ( media["coverImage"].get("extraLarge") or media["coverImage"].get("large") or media["coverImage"].get("medium") ) else: image_url = media.get("bannerImage") if not image_url: return {"error": "Image not available"} # Fetch the actual image bytes img_response = await client.get(image_url) if img_response.status_code != 200: return {"error": "Failed to fetch image"} # Guess content type (fallback to generic image/jpeg) content_type = img_response.headers.get("Content-Type", "image/jpeg") return StreamingResponse(io.BytesIO(img_response.content), media_type=content_type) def get_anime_title(jikan_id: str) -> str: """ Fetch the anime title from Jikan API using the provided Jikan ID. Prefers the English title if available; otherwise falls back to the default title. """ url = f"https://api.jikan.moe/v4/anime/{jikan_id}" try: resp = requests.get(url, timeout=10) resp.raise_for_status() # Will raise an exception for 4xx/5xx status data = resp.json().get("data", {}) title = data.get("title_english") or data.get("title") if not title: raise HTTPException(status_code=500, detail="Failed to retrieve title from Jikan") return title except requests.exceptions.HTTPError: raise HTTPException(status_code=404, detail=f"Jikan ID {jikan_id} not found") except requests.exceptions.RequestException as e: raise HTTPException(status_code=502, detail=f"Error contacting Jikan API: {e}") def slugify_title_for_9anime(title: str) -> str: """Converts a title to a URL-friendly slug, merging apostrophe-s into the preceding word.""" # 1) Handle English possessives: "journey's" → "journeys" s = re.sub(r"(?i)([a-z0-9])'s\b", r"\1s", title) # 2) Remove any remaining apostrophes (and typographic ’) entirely s = re.sub(r"[’']", "", s) # 3) Lowercase and replace any run of non-alphanumerics with a single hyphen s = s.lower() s = re.sub(r"[^a-z0-9]+", "-", s) # 4) Collapse multiple hyphens into one and strip leading/trailing hyphens s = re.sub(r"-{2,}", "-", s).strip("-") return s def resolve_redirect_url(url: str) -> str: """Follows redirects to find the final direct download URL.""" try: headers = {"User-Agent": "Mozilla/5.0"} resp = requests.head(url, headers=headers, allow_redirects=True, timeout=15) resp.raise_for_status() return resp.url except requests.exceptions.Timeout: raise HTTPException(status_code=408, detail="Request timed out while resolving download redirect.") except requests.exceptions.RequestException as e: raise HTTPException(status_code=502, detail=f"Failed to resolve download redirect: {e}") # --- NEW LOGIC: Nonce Extraction and AJAX Call --- def extract_nonce_from_html(html: str) -> str: """Extracts the nonce value from the HTML or JavaScript.""" match = re.search(r"nonce\s*[:=]\s*['\"]([a-fA-F0-9]{10,})['\"]", html) if match: return match.group(1) raise HTTPException(status_code=500, detail="Could not find nonce in the 9anime page HTML.") def get_nonce_from_9anime(jikan_id: str, episode: int, dub: bool) -> str: """Fetches the 9anime episode page and extracts the security nonce.""" title = get_anime_title(jikan_id) base_slug = slugify_title_for_9anime(title) slug_suffix = f"-dub-episode-{episode}" if dub else f"-episode-{episode}" nineanime_slug = f"{base_slug}{slug_suffix}" nineanime_url = f"https://9anime.org.lv/{nineanime_slug}/" headers = {"User-Agent": "Mozilla/5.0"} try: resp = requests.get(nineanime_url, headers=headers, timeout=10) resp.raise_for_status() return extract_nonce_from_html(resp.text) except requests.exceptions.HTTPError: raise HTTPException(status_code=404, detail=f"9anime page not found at: {nineanime_url}") except requests.exceptions.RequestException as e: raise HTTPException(status_code=502, detail=f"Failed to fetch 9anime page for nonce: {e}") def get_download_link(jikan_id: str, episode: int, dub: bool, quality: 'Quality') -> str: """ Fetches the final, direct download link by getting a nonce and using the site's internal AJAX endpoint. """ nonce = get_nonce_from_9anime(jikan_id, episode, dub) ajax_params = { "action": "fetch_download_links", "mal_id": jikan_id, "ep": episode, "nonce": nonce } try: resp = requests.get( "https://9anime.org.lv/wp-admin/admin-ajax.php", params=ajax_params, headers={"User-Agent": "Mozilla/5.0"}, timeout=10 ) resp.raise_for_status() data = resp.json() except requests.exceptions.RequestException as e: raise HTTPException(status_code=502, detail=f"Failed to fetch download links via AJAX: {e}") # Check the status inside the JSON payload response_data = data.get("data", data) # Handle both {data: {status...}} and {status...} status = response_data.get("status") if status == 200: html_content = response_data.get("result") soup = BeautifulSoup(html_content, "html.parser") section_text = "Dub" if dub else "Sub" section_heading = soup.find("div", string=section_text) if not section_heading: other_section = "Sub" if dub else "Dub" hint = f" The '{other_section}' version might be available." if soup.find("div", string=other_section) else "" raise HTTPException(status_code=404, detail=f"'{section_text}' download section not found.{hint}") links_container = section_heading.find_next_sibling("div") if not links_container: raise HTTPException(status_code=404, detail=f"Could not find link container for '{section_text}'.") quality_link_tag = links_container.find("a", string=lambda t: t and t.strip().lower() == quality.value.lower()) if not quality_link_tag or not quality_link_tag.has_attr('href'): available_links = [a.get_text(strip=True) for a in links_container.find_all('a')] detail_msg = f"Download link for quality '{quality.value}' not found in '{section_text}' section." if available_links: detail_msg += f" Available qualities: {', '.join(available_links)}" raise HTTPException(status_code=404, detail=detail_msg) initial_url = quality_link_tag['href'] return resolve_redirect_url(initial_url) elif status == 500: raise HTTPException(status_code=404, detail="No download links available yet for this episode.") else: error_message = response_data.get("result", "An unknown error occurred from the download link provider.") raise HTTPException(status_code=500, detail=error_message) # --- FastAPI Endpoints --- # This endpoint is for the iframe player, it uses a different logic def find_9anime_iframe_src(nineanime_url: str) -> str: headers = {"User-Agent": "Mozilla/5.0"} resp = requests.get(nineanime_url, headers=headers, timeout=10) if resp.status_code != 200: raise HTTPException(status_code=404, detail=f"9Anime page not found: {nineanime_url}") soup = BeautifulSoup(resp.text, "html.parser") embed_div = soup.find("div", id="embed_holder") if not embed_div: raise HTTPException(status_code=500, detail=f"No
on {nineanime_url}") iframe_tag = embed_div.find("iframe", src=True) if iframe_tag: return iframe_tag["src"] script_tag = embed_div.find("script") if script_tag and script_tag.string: match = re.search(r"]*\s+src=[\"']([^\"']+)[\"']", script_tag.string) if match: return match.group(1) raise HTTPException(status_code=500, detail=f"Could not extract iframe src from 9Anime page: {nineanime_url}") def generate_iframe_src(jikan_id: str, episode: int, dub: bool = False) -> str: title = get_anime_title(jikan_id) base_slug = slugify_title_for_9anime(title) slug_suffix = f"-dub-episode-{episode}" if dub else f"-episode-{episode}" nineanime_slug = f"{base_slug}{slug_suffix}" nineanime_url = f"https://9anime.org.lv/{nineanime_slug}/" return find_9anime_iframe_src(nineanime_url) @app.get("/iframe-src") def iframe_src_endpoint(id: str = Query(..., description="Jikan anime ID"), episode: int = Query(...), dub: bool = Query(False)): """Returns JSON: { "src": "" } for embedding a video player.""" return {"src": generate_iframe_src(id, episode, dub)} @app.get("/", response_class=HTMLResponse) def full_player_page_endpoint(id: str = Query(..., description="Jikan anime ID"), episode: int = Query(...), dub: bool = Query(False)): """Returns a full HTML page with the embedded video player.""" iframe_src = generate_iframe_src(id, episode, dub) title_prefix = 'Dub ' if dub else '' html_content = f""" {title_prefix}Episode {episode} Player """ return HTMLResponse(content=html_content, status_code=200) # --- NEW, IMPROVED DOWNLOAD ENDPOINT --- class Quality(str, Enum): """Enum for allowed video qualities.""" p360 = "360p" p720 = "720p" p1080 = "1080p" @app.get("/download-link") def download_link_endpoint( id: str = Query(..., description="Jikan anime ID"), episode: int = Query(..., description="Episode number"), dub: bool = Query(False, description="Whether to use dubbed version"), quality: Quality = Query(..., description="Desired video quality."), ): """ Returns a JSON object with a direct download link for the specified episode. This uses a reliable internal API method with a security nonce. """ try: final_url = get_download_link(id, episode, dub, quality) return {"download_url": final_url} except HTTPException as e: # Re-raise exceptions we've already formatted for the API raise e except Exception as e: # Catch any other unexpected errors and format them raise HTTPException(status_code=500, detail=f"An unexpected internal error occurred: {e}") @app.get("/download", response_class=HTMLResponse) def download_page( request: Request, id: str = Query(..., description="Jikan anime ID"), episode: int = Query(..., description="Episode number"), dub: bool = Query(False, description="Dubbed version?"), quality: Quality = Query(..., description="Desired video quality") ): html = f""" Download Episode

Preparing your download...

""" return HTMLResponse(content=html)