Spaces:
Running
Running
#!/usr/bin/env python | |
# -*- coding: utf-8 -*- | |
# Copyright 2020-2023 (c) Randy W @xtdevs, @xtsea | |
# | |
# from : https://github.com/TeamKillerX | |
# Channel : @RendyProjects | |
# This program is free software: you can redistribute it and/or modify | |
# it under the terms of the GNU Affero General Public License as published by | |
# the Free Software Foundation, either version 3 of the License, or | |
# (at your option) any later version. | |
# | |
# This program is distributed in the hope that it will be useful, | |
# but WITHOUT ANY WARRANTY; without even the implied warranty of | |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
# GNU Affero General Public License for more details. | |
# | |
# You should have received a copy of the GNU Affero General Public License | |
# along with this program. If not, see <https://www.gnu.org/licenses/>. | |
import json | |
import re | |
import os | |
import io | |
import requests | |
import base64 | |
import uvicorn | |
import shutil | |
import random | |
import tempfile | |
import string | |
import openai | |
import uuid | |
import time | |
import hashlib | |
import httpx | |
import asyncio | |
from io import BytesIO | |
from datetime import datetime as dt | |
from dotenv import load_dotenv | |
from bs4 import BeautifulSoup | |
from PIL import Image, ImageEnhance | |
from typing import * | |
from typing_extensions import Annotated | |
from typing import Annotated, Union | |
from typing import Optional, List, Dict, Any | |
from pydantic import BaseModel, EmailStr | |
from base64 import b64decode as kc | |
from base64 import b64decode | |
from random import choice | |
from gpytranslate import SyncTranslator | |
from httpx import AsyncClient | |
from telegraph import Telegraph, upload_file | |
from pathlib import Path | |
from serpapi import GoogleSearch | |
from fastapi import FastAPI, UploadFile, File, Response, Request | |
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm | |
from fastapi.security import APIKeyHeader | |
from fastapi import Depends, HTTPException, status | |
from fastapi.openapi.utils import get_openapi | |
from fastapi.responses import StreamingResponse, JSONResponse, FileResponse | |
from fastapi.responses import * | |
from fastapi import Request, Header | |
from fastapi import Body, Query | |
from fastapi.staticfiles import StaticFiles | |
from fastapi.templating import Jinja2Templates | |
from fastapi import FastAPI, HTTPException | |
from cryptography.fernet import Fernet | |
from motor.motor_asyncio import AsyncIOMotorClient | |
from datetime import datetime, timedelta | |
from starlette.responses import RedirectResponse | |
from starlette.staticfiles import StaticFiles | |
from starlette.middleware.sessions import SessionMiddleware | |
from starlette.middleware.cors import CORSMiddleware | |
import g4f | |
from g4f.client import Client | |
from g4f.client import Client as BingClient | |
from g4f.cookies import set_cookies | |
from g4f.Provider import BingCreateImages, OpenaiChat, Gemini | |
from pymongo import MongoClient | |
from RyuzakiLib.hackertools.chatgpt import RendyDevChat | |
from RyuzakiLib.hackertools.gemini import GeminiLatest | |
from RyuzakiLib.hackertools.openai_api import OpenAI | |
from RyuzakiLib.hackertools.huggingface import FaceAI | |
from RyuzakiLib import AsyicXSearcher | |
from RyuzakiLib.mental import BadWordsList | |
from RyuzakiLib.system import System | |
from bardapi import Bard | |
from models import * | |
from template import send_blackbox_chat | |
from gpytranslate import SyncTranslator | |
import logging | |
import functions as code | |
from fluxai import router as fluxai_router | |
from whisper import router as whisper_router | |
from instagram import router as instagram_router | |
from driver import YoutubeDriver | |
from yt_dlp import YoutubeDL | |
from xnxx_api import search_filters | |
from xnxx_api.xnxx_api import Client as xnxx_client | |
logging.basicConfig(level=logging.ERROR) | |
logging.basicConfig(level=logging.INFO) | |
logger = logging.getLogger(__name__) | |
load_dotenv() | |
# Database url | |
MONGO_URL = os.environ["MONGO_URL"] | |
# source url | |
SOURCE_UNSPLASH_URL = os.environ["SOURCE_UNSPLASH_URL"] | |
SOURCE_OCR_URL = os.environ["SOURCE_OCR_URL"] | |
SOURCE_ALPHA_URL = os.environ["SOURCE_ALPHA_URL"] | |
SOURCE_OPENDALLE_URL = os.environ["SOURCE_OPENDALLE_URL"] | |
SOURCE_DALLE3XL_URL = os.environ["SOURCE_DALLE3XL_URL"] | |
SOURCE_ANIME_STYLED_URL = os.environ["SOURCE_ANIME_STYLED_URL"] | |
SOURCE_WAIFU_URL = os.environ["SOURCE_WAIFU_URL"] | |
SOURCE_TIKTOK_WTF_URL = os.environ["SOURCE_TIKTOK_WTF_URL"] | |
SOURCE_TIKTOK_TECH_URL = os.environ["SOURCE_TIKTOK_TECH_URL"] | |
SOURCE_WHAT_GAY_URL = os.environ["SOURCE_WHAT_GAY_URL"] | |
SOURCE_ASSISTANT_GOOGLE_AI = os.environ["SOURCE_ASSISTANT_GOOGLE_AI"] | |
SOURCE_MONITOR_URL = os.environ["SOURCE_MONITOR_URL"] | |
SOURCE_OPENAI_ACCESS_URL = os.environ["SOURCE_OPENAI_ACCESS_URL"] | |
SOURCE_PICSART_URL = os.environ["SOURCE_PICSART_URL"] | |
# api keys | |
REVERSE_IMAGE_API = os.environ["REVERSE_IMAGE_API"] | |
OCR_API_KEY = os.environ["OCR_API_KEY"] | |
ONLY_DEVELOPER_API_KEYS = os.environ["ONLY_DEVELOPER_API_KEYS"] | |
HUGGING_TOKEN = os.environ["HUGGING_TOKEN"] | |
ASSISTANT_GOOGLE_API_KEYS = os.environ["ASSISTANT_GOOGLE_API_KEYS"] | |
COOKIE_BARD_TOKEN = os.environ["COOKIE_BARD_TOKEN"] | |
MONITOR_API_KEYS = os.environ["MONITOR_API_KEYS"] | |
PICSART_API_KEY = os.environ["PICSART_API_KEY"] | |
client = AsyncIOMotorClient(MONGO_URL) | |
db = client['tiktokbot'] | |
collection = db['users'] | |
trans = SyncTranslator() | |
app = FastAPI(docs_url=None, redoc_url=None) | |
app.mount("/static", StaticFiles(directory="static"), name="static") | |
app.include_router(fluxai_router, prefix="/api/v1") | |
app.include_router(whisper_router, prefix="/api/v1") | |
app.include_router(instagram_router, prefix="/api/v1") | |
app.add_middleware( | |
CORSMiddleware, | |
allow_origins=["*"], | |
allow_credentials=True, | |
allow_methods=["*"], | |
allow_headers=["*"], | |
) | |
async def read_index(): | |
with open("static/index.html") as f: | |
return HTMLResponse(content=f.read(), status_code=200) | |
timeout = 100 | |
contact_support = """ | |
We are aware that AI is currently offline. This seems to be caused by the API | |
We are investigating and doing our best to get things back online as soon as possible. Thank you for your patience | |
~ Contact Support @xtdevs | |
""" | |
internal_error = """ | |
There has been an Internal error. We are aware of this error and notice that it can be | |
caused by your search terms being to explict, too confusing, or it can be caused by the API. | |
Please modify your search terms and/or try again later thank you for your understanding. | |
~ @xtdevs Team | |
""" | |
UPLOAD_DIRECTORY = "./uploads" | |
class YouTubeBase(BaseModel): | |
link: str | |
only_audio: bool = False | |
class YouTubeSearch(BaseModel): | |
query: str | |
limit: int = 7 | |
class FedBans(BaseModel): | |
user_id: int | |
hashtag: str | |
reason: str | |
class GetsaFedBans(BaseModel): | |
user_id: int | |
async def get_all_api_keys(): | |
user = await collection.find({}) | |
api_keys = [] | |
for x in user: | |
api_key = x.get("ryuzaki_api_key") | |
if api_key: | |
api_keys.append(api_key) | |
return api_keys | |
async def get_all_fedbans_api_keys(): | |
user = await collection.find({}) | |
api_keys = [] | |
for x in user: | |
api_key = x.get("fedbans_api_key") | |
if api_key: | |
api_keys.append(api_key) | |
return api_keys | |
async def validate_api_key(api_key: str = Header(...)): | |
USERS_API_KEYS = await get_all_api_keys() | |
if api_key not in USERS_API_KEYS: | |
raise HTTPException(status_code=401, detail="Invalid API key") | |
def validate_api_key_only_devs(api_key: str = Header(...)): | |
if api_key not in ONLY_DEVELOPER_API_KEYS: | |
raise HTTPException(status_code=401, detail="Invalid API key") | |
async def validate_api_key_fedbans(api_key: str = Header(...)): | |
USERS_API_KEYS = await get_all_fedbans_api_keys() | |
if api_key not in USERS_API_KEYS: | |
raise HTTPException(status_code=401, detail="Invalid API key") | |
def secs_to_mins(secs: int) -> str: | |
mins, secs = divmod(secs, 60) | |
return f"{mins}:{secs}" | |
api_key_header = APIKeyHeader(name="api_key", auto_error=False) | |
async def validate_api_key_porno(api_key: Optional[str] = Depends(api_key_header)) -> str: | |
if api_key is None: | |
raise HTTPException(status_code=403, detail="API key required") | |
user = await collection.find_one({"username": api_key, "blocked": False}) | |
if not user: | |
raise HTTPException(status_code=403, detail="Invalid or blocked API key") | |
return api_key | |
class RegistrationModel(BaseModel): | |
gmail: EmailStr | |
api_key: str | |
expiration_days: int = 3 | |
key = Fernet.generate_key() | |
cipher_suite = Fernet(key) | |
def encrypt_key(api_key: str) -> str: | |
encrypted_key = cipher_suite.encrypt(api_key.encode()) | |
return encrypted_key.decode() | |
def status(): | |
return {"message": "running"} | |
async def some_endpoint(api_key: str = Depends(validate_api_key_porno)): | |
return {"message": "Access granted"} | |
async def register_user(data: RegistrationModel): | |
encrypted_key = encrypt_key(data.api_key) | |
current_time = datetime.utcnow() | |
expiration_time = current_time + timedelta(days=data.expiration_days) | |
result = await collection.insert_one({ | |
"username": data.api_key, | |
"gmail": data.gmail, | |
"key": encrypted_key, | |
"blocked": False, | |
"created_at": current_time, | |
"expires_at": expiration_time | |
}) | |
if result.inserted_id: | |
return {"message": f"User {data.api_key} registered successfully"} | |
else: | |
raise HTTPException(status_code=500, detail="Registration failed") | |
async def youtube_search(payload: YouTubeSearch): | |
try: | |
results = YoutubeDriver(payload.query, payload.limit).to_dict() | |
except Exception as e: | |
return SuccessResponse( | |
status="False", | |
randydev={"error": str(e)} | |
) | |
if not results: | |
return SuccessResponse( | |
status="False", | |
randydev={"error": "No results found."} | |
) | |
text = f"**🔎 𝖳𝗈𝗍𝖺𝗅 𝖱𝖾𝗌𝗎𝗅𝗍𝗌 𝖥𝗈𝗎𝗇𝖽:** `{len(results)}`\n\n" | |
for result in results: | |
text += f"**𝖳𝗂𝗍𝗅𝖾:** `{result['title'][:50]}`\n**𝖢𝗁𝖺𝗇𝗇𝖾𝗅:** `{result['channel']}`\n**𝖵𝗂𝖾𝗐𝗌:** `{result['views']}`\n**𝖣𝗎𝗋𝖺𝗍𝗂𝗈𝗇:** `{result['duration']}`\n**𝖫𝗂𝗇𝗄:** `https://youtube.com{result['url_suffix']}`\n\n" | |
return SuccessResponse( | |
status="True", | |
randydev={"results": text} | |
) | |
async def youtube_api(payload: YouTubeBase): | |
status, url = YoutubeDriver.check_url(payload.link) | |
if not status: | |
return SuccessResponse( | |
status="False", | |
randydev={"error": url} | |
) | |
try: | |
time_second = time.time() | |
if payload.only_audio: | |
with YoutubeDL(YoutubeDriver.song_options()) as ytdl: | |
yt_data = ytdl.extract_info(url, download=True) | |
yt_file = ytdl.prepare_filename(yt_data).replace('.webm', '.mp3') | |
ytdl.process_info(yt_data) | |
with open(yt_file, "rb") as audio_file: | |
encoded_string = base64.b64encode(audio_file.read()).decode('utf-8') | |
os.remove(yt_file) | |
return SuccessResponse( | |
status="True", | |
randydev={ | |
"audio_data": encoded_string, | |
"channel": yt_data['channel'], | |
"title": yt_data['title'], | |
"views": yt_data['view_count'], | |
"duration": secs_to_mins(int(yt_data['duration'])), | |
"thumbnail": f"https://i.ytimg.com/vi/{yt_data['id']}/hqdefault.jpg", | |
"time_second": time_second | |
} | |
) | |
else: | |
with YoutubeDL(YoutubeDriver.video_options()) as ytdl: | |
yt_data = ytdl.extract_info(url, download=True) | |
yt_file = ytdl.prepare_filename(yt_data).replace('.webm', '.mp4') | |
with open(yt_file, "rb") as video_file: | |
encoded_string = base64.b64encode(video_file.read()).decode('utf-8') | |
os.remove(yt_file) | |
return SuccessResponse( | |
status="True", | |
randydev={ | |
"video_data": encoded_string, | |
"channel": yt_data['channel'], | |
"title": yt_data['title'], | |
"views": yt_data['view_count'], | |
"duration": secs_to_mins(int(yt_data['duration'])), | |
"thumbnail": f"https://i.ytimg.com/vi/{yt_data['id']}/hqdefault.jpg", | |
"time_second": time_second | |
} | |
) | |
except Exception as e: | |
return SuccessResponse( | |
status="False", | |
randydev={"error": f"An error occurred: {str(e)}"} | |
) | |
async def tebakgambar(): | |
try: | |
response = requests.get('https://jawabantebakgambar.net/all-answers/') | |
response.raise_for_status() | |
soup = BeautifulSoup(response.text, 'html.parser') | |
random_num = random.randint(2, 2836) | |
link2 = 'https://jawabantebakgambar.net' | |
selected_item = soup.select_one(f'#images > li:nth-child({random_num}) > a') | |
if selected_item: | |
img = link2 + selected_item.find('img').get('data-src') | |
jwb = selected_item.find('img').get('alt') | |
result = { | |
"message": "By Randydev", | |
"image": img, | |
"jawaban": jwb | |
} | |
return result | |
else: | |
raise ValueError("Selected item not found.") | |
except Exception: | |
return None | |
async def fbdown_hack(link): | |
try: | |
data = {"url": link} | |
headers = { | |
"content-type": "application/x-www-form-urlencoded", | |
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36", | |
"cookie": "_ga=GA1.2.1310699039.1624884412; _pbjs_userid_consent_data=3524755945110770; cto_bidid=rQH5Tl9NNm5IWFZsem00SVVuZGpEd21sWnp0WmhUeTZpRXdkWlRUOSUyQkYlMkJQQnJRSHVPZ3Fhb1R2UUFiTWJuVGlhVkN1TGM2anhDT1M1Qk0ydHlBb21LJTJGNkdCOWtZalRtZFlxJTJGa3FVTG1TaHlzdDRvJTNE; cto_bundle=g1Ka319NaThuSmh6UklyWm5vV2pkb3NYaUZMeWlHVUtDbVBmeldhNm5qVGVwWnJzSUElMkJXVDdORmU5VElvV2pXUTJhQ3owVWI5enE1WjJ4ZHR5NDZqd1hCZnVHVGZmOEd0eURzcSUyQkNDcHZsR0xJcTZaRFZEMDkzUk1xSmhYMlY0TTdUY0hpZm9NTk5GYXVxWjBJZTR0dE9rQmZ3JTNEJTNE; _gid=GA1.2.908874955.1625126838; __gads=ID=5be9d413ff899546-22e04a9e18ca0046:T=1625126836:RT=1625126836:S=ALNI_Ma0axY94aSdwMIg95hxZVZ-JGNT2w; cookieconsent_status=dismiss" | |
} | |
response = requests.post('https://www.getfvid.com/downloader', data=data, headers=headers) | |
response.raise_for_status() | |
soup = BeautifulSoup(response.text, 'html.parser') | |
normal_video = soup.select_one('div.col-md-4.btns-download > p:nth-child(2) > a').get('href') | |
hd_video = soup.select_one('div.col-md-4.btns-download > p:nth-child(1) > a').get('href') | |
audio = soup.select_one('div.col-md-4.btns-download > p:nth-child(3) > a').get('href') | |
return { | |
"normal_video": normal_video, | |
"HD": hd_video, | |
"audio": audio | |
} | |
except Exception: | |
return None | |
async def HentaiAnime(): | |
try: | |
page = random.randint(1, 1153) | |
response = requests.get(f'https://sfmcompile.club/page/{page}') | |
response.raise_for_status() | |
soup = BeautifulSoup(response.text, 'html.parser') | |
hasil = [] | |
articles = soup.select('#primary > div > div > ul > li > article') | |
for article in articles: | |
title = article.select_one('header > h2').text | |
link = article.select_one('header > h2 > a')['href'] | |
category = article.select_one('header > div.entry-before-title > span > span').text.replace('in ', '') | |
share_count = article.select_one('header > div.entry-after-title > p > span.entry-shares').text | |
views_count = article.select_one('header > div.entry-after-title > p > span.entry-views').text | |
type_ = article.select_one('source')['type'] if article.select_one('source') else 'image/jpeg' | |
video_1 = article.select_one('source')['src'] if article.select_one('source') else article.select_one('img')['data-src'] | |
video_2 = article.select_one('video > a')['href'] if article.select_one('video > a') else '' | |
hasil.append({ | |
"title": title, | |
"link": link, | |
"category": category, | |
"share_count": share_count, | |
"views_count": views_count, | |
"type": type_, | |
"video_1": video_1, | |
"video_2": video_2 | |
}) | |
if not hasil: | |
return {'developer': '@xtdevs', 'error': 'no result found'} | |
return hasil | |
except Exception: | |
return None | |
async def hapyymod(q): | |
host = "https://happymod.com" | |
response = requests.get(f"{host}/search.html?q={q}") | |
if response.status_code != 200: | |
return {"status": response.status_code, "result": []} | |
html = response.text | |
soup = BeautifulSoup(html, 'html.parser') | |
tez = soup.select("body > div.container-row.clearfix.container-wrap > div.container-left > section > div.pdt-app-box") | |
if not tez: | |
return {"status": 404, "result": []} | |
res = [] | |
for item in tez: | |
link = host + item.find("a")["href"] | |
title = item.find("a")["title"] | |
thumb = item.find("img")["data-original"] | |
res.append({"title": title, "link": link, "thumb": thumb}) | |
return {"status": 200, "creator": "@xtdevs", "result": res} | |
def _scraper_porn(url): | |
response = requests.get(url) | |
response.raise_for_status() | |
soup = BeautifulSoup(response.text, "html.parser") | |
data = [] | |
for meta in soup.select(".player meta"): | |
content = meta.get("content") | |
if content: | |
data.append(content) | |
return { | |
"title": data[0] if len(data) > 0 else None, | |
"thumbnail": data[2] if len(data) > 2 else None, | |
"video": data[4] if len(data) > 4 else None, | |
"date": data[5] if len(data) > 5 else None, | |
"url": data[6] if len(data) > 6 else None, | |
} | |
def _search_porn(query): | |
url = f"https://www.pornwhite.com/search/?q={query}" | |
response = requests.get(url) | |
response.raise_for_status() | |
soup = BeautifulSoup(response.text, "html.parser") | |
data = [] | |
for a in soup.select("a"): | |
href = a.get("href") | |
if href and href.startswith("https://www.pornwhite.com/videos/"): | |
data.append(href) | |
return data | |
def search_pornwhite_videos(query): | |
images = _search_porn(query) | |
data = [] | |
for image in images: | |
data.append(_scraper_porn(image)) | |
return data | |
async def search_porn_images(query): | |
data = [] | |
try: | |
url = f"https://www.pornpics.com/{query}/" | |
response = requests.get(url) | |
response.raise_for_status() | |
soup = BeautifulSoup(response.text, "html.parser") | |
for img in soup.select(".rel-link img"): | |
src = img.get("data-src") | |
if src: | |
data.append(src) | |
return data | |
except requests.exceptions.RequestException: | |
return [] | |
async def pinterest_api_v2(search): | |
if not search: | |
return SuccessResponse( | |
status="False", | |
randydev={"error": "pinterest API"} | |
) | |
headers = { | |
'authority': 'www.pinterest.com', | |
'cache-control': 'max-age=0', | |
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', | |
'upgrade-insecure-requests': '1', | |
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36', | |
'sec-gpc': '1', | |
'sec-fetch-site': 'same-origin', | |
'sec-fetch-mode': 'same-origin', | |
'sec-fetch-dest': 'empty', | |
'accept-language': 'en-US,en;q=0.9', | |
'cookie': 'csrftoken=92c7c57416496066c4cd5a47a2448e28; g_state={"i_l":0}; _auth=1; _pinterest_sess=TWc9PSZBMEhrWHJZbHhCVW1OSzE1MW0zSkVid1o4Uk1laXRzdmNwYll3eEFQV0lDSGNRaDBPTGNNUk5JQTBhczFOM0ZJZ1ZJbEpQYlIyUmFkNzlBV2kyaDRiWTI4THFVUWhpNUpRYjR4M2dxblJCRFhESlBIaGMwbjFQWFc2NHRtL3RUcTZna1c3K0VjVTgyejFDa1VqdXQ2ZEQ3NG91L1JTRHZwZHNIcDZraEp1L0lCbkJWUytvRis2ckdrVlNTVytzOFp3ZlpTdWtCOURnbGc3SHhQOWJPTzArY3BhMVEwOTZDVzg5VDQ3S1NxYXZGUEEwOTZBR21LNC9VZXRFTkErYmtIOW9OOEU3ektvY3ZhU0hZWVcxS0VXT3dTaFpVWXNuOHhiQWdZdS9vY24wMnRvdjBGYWo4SDY3MEYwSEtBV2JxYisxMVVsV01McmpKY0VOQ3NYSUt2ZDJaWld6T0RacUd6WktITkRpZzRCaWlCTjRtVXNMcGZaNG9QcC80Ty9ZZWFjZkVGNURNZWVoNTY4elMyd2wySWhtdWFvS2dQcktqMmVUYmlNODBxT29XRWx5dWZSc1FDY0ZONlZJdE9yUGY5L0p3M1JXYkRTUDAralduQ2xxR3VTZzBveUc2Ykx3VW5CQ0FQeVo5VE8wTEVmamhwWkxwMy9SaTNlRUpoQmNQaHREbjMxRlRrOWtwTVI5MXl6cmN1K2NOTFNyU1cyMjREN1ZFSHpHY0ZCR1RocWRjVFZVWG9VcVpwbXNGdlptVzRUSkNadVc1TnlBTVNGQmFmUmtrNHNkVEhXZytLQjNUTURlZXBUMG9GZ3YwQnVNcERDak16Nlp0Tk13dmNsWG82U2xIKyt5WFhSMm1QUktYYmhYSDNhWnB3RWxTUUttQklEeGpCdE4wQlNNOVRzRXE2NkVjUDFKcndvUzNMM2pMT2dGM05WalV2QStmMC9iT055djFsYVBKZjRFTkRtMGZZcWFYSEYvNFJrYTZSbVRGOXVISER1blA5L2psdURIbkFxcTZLT3RGeGswSnRHdGNpN29KdGFlWUxtdHNpSjNXQVorTjR2NGVTZWkwPSZzd3cwOXZNV3VpZlprR0VBempKdjZqS00ybWM9; _b="AV+pPg4VpvlGtL+qN4q0j+vNT7JhUErvp+4TyMybo+d7CIZ9QFohXDj6+jQlg9uD6Zc="; _routing_id="d5da9818-8ce2-4424-ad1e-d55dfe1b9aed"; sessionFunnelEventLogged=1' | |
} | |
url = f'https://www.pinterest.com/search/pins/?q={search}&rs=typed&term_meta[]={search}|typed' | |
try: | |
response = requests.get(url, headers=headers) | |
arr_match = re.findall(r'https://i\.pinimg\.com/originals/[^.]+\.jpg', response.text) | |
my_data = { | |
"count": len(arr_match), | |
"data": arr_match | |
} | |
return my_data | |
except Exception as error: | |
return SuccessResponse( | |
status="False", | |
randydev={"error": "An error occurred while fetching data"} | |
) | |
def _get_image_md5_content(file_path: str) -> Tuple[str, bytes]: | |
with open(file_path, "rb") as fp: | |
content = fp.read() | |
image_md5 = base64.b64encode(hashlib.md5(content).digest()).decode("utf-8") | |
return image_md5, content | |
async def enhance_photo_and_remini(api_key: str, file_path: str): | |
CONTENT_TYPE = "image/jpeg" | |
_TIMEOUT = 60 | |
_BASE_URL = "https://developer.remini.ai/api" | |
image_md5, content = _get_image_md5_content(file_path) | |
async with httpx.AsyncClient( | |
base_url=_BASE_URL, | |
headers={"Authorization": f"Bearer {api_key}"}, | |
) as client: | |
response = await client.post( | |
"/tasks", | |
json={ | |
"tools": [ | |
{"type": "face_enhance", "mode": "beautify"}, | |
{"type": "background_enhance", "mode": "base"} | |
], | |
"image_md5": image_md5, | |
"image_content_type": CONTENT_TYPE | |
} | |
) | |
assert response.status_code == 200 | |
body = response.json() | |
task_id = body["task_id"] | |
response = await client.put( | |
body["upload_url"], | |
headers=body["upload_headers"], | |
content=content, | |
timeout=_TIMEOUT | |
) | |
assert response.status_code == 200 | |
response = await client.post(f"/tasks/{task_id}/process") | |
assert response.status_code == 202 | |
for i in range(50): | |
response = await client.get(f"/tasks/{task_id}") | |
assert response.status_code == 200 | |
if response.json()["status"] == "completed": | |
break | |
else: | |
await asyncio.sleep(2) | |
output_url = response.json()["result"]["output_url"] | |
return output_url | |
os.remove(file_path) | |
async def remini(remini_api_key: str, file: UploadFile = File(...)): | |
try: | |
image = Image.open(io.BytesIO(await file.read())) | |
buffer = io.BytesIO() | |
image.save(buffer, format='JPEG') | |
buffer.seek(0) | |
except Exception as e: | |
return SuccessResponse( | |
status="False", | |
randydev={"error": f"Error fucking: {e}"} | |
) | |
try: | |
response = await enhance_photo_and_remini(remini_api_key, buffer) | |
return SuccessResponse( | |
status="True", | |
randydev={"output_url": response} | |
) | |
except Exception as e: | |
return SuccessResponse( | |
status="False", | |
randydev={"error": f"Error fucking: {e}"} | |
) | |
async def pinterestv2(query: str): | |
try: | |
response = await pinterest_api_v2(query) | |
return SuccessResponse( | |
status="True", | |
randydev={"results": response} | |
) | |
except: | |
return SuccessResponse( | |
status="False", | |
randydev={"error": "Error fucking"} | |
) | |
async def pornpics(query: str): | |
try: | |
response = await search_porn_images(query) | |
return SuccessResponse( | |
status="True", | |
randydev={"results": response} | |
) | |
except: | |
return SuccessResponse( | |
status="False", | |
randydev={"error": "Error fucking"} | |
) | |
async def pornowhite_(query: str): | |
try: | |
response = search_pornwhite_videos(query) | |
return SuccessResponse( | |
status="True", | |
randydev={"results": response} | |
) | |
except Exception as e: | |
return SuccessResponse( | |
status="False", | |
randydev={"error": f"Error fucking: {e}"} | |
) | |
async def happymods_(q: str): | |
try: | |
response = await hapyymod(q) | |
return SuccessResponse( | |
status="True", | |
randydev={"results": response} | |
) | |
except: | |
return SuccessResponse( | |
status="False", | |
randydev={"error": "Error fucking"} | |
) | |
async def hentai_(): | |
try: | |
response = await HentaiAnime() | |
return SuccessResponse( | |
status="True", | |
randydev={"results": response} | |
) | |
except: | |
return SuccessResponse( | |
status="False", | |
randydev={"error": "Error fucking"} | |
) | |
async def fbdown_(link: str): | |
try: | |
response = await fbdown_hack(link) | |
return SuccessResponse( | |
status="True", | |
randydev={"results": response} | |
) | |
except: | |
return SuccessResponse( | |
status="False", | |
randydev={"error": "Error fucking"} | |
) | |
async def tebakgambar_answer(): | |
try: | |
response = await tebakgambar() | |
return SuccessResponse( | |
status="True", | |
randydev={"results": response} | |
) | |
except: | |
return SuccessResponse( | |
status="False", | |
randydev={"error": "Error fucking"} | |
) | |
async def toanimes_(file: UploadFile = File(...)): | |
try: | |
image = Image.open(io.BytesIO(await file.read())) | |
buffer = io.BytesIO() | |
image.save(buffer, format='JPEG') | |
buffer.seek(0) | |
files = { | |
'image': ('toanime.jpg', buffer, 'image/jpeg') | |
} | |
headers = {"accept": "application/json"} | |
response = requests.post("https://tools.betabotz.eu.org/ai/toanime", files=files, headers=headers) | |
if response.status_code != 200: | |
return SuccessResponse( | |
status="False", | |
randydev={"error": "Failed to process the image"} | |
) | |
data = response.json() | |
res = { | |
"image_data": data['result'], | |
"image_size": data['size'] | |
} | |
return SuccessResponse( | |
status="True", | |
randydev={"results": res} | |
) | |
except Exception as e: | |
return SuccessResponse( | |
status="False", | |
randydev={"error": f"Error during processing: {str(e)}"} | |
) | |
async def api_tools_fix(name, ok, query): | |
url = f"https://tools.betabotz.eu.org/tools/{name}?{ok}={query}" | |
response = requests.get(url).json() | |
return response | |
async def facebookdl_(link: str): | |
try: | |
response = await api_tools_fix(name="facebookdl", ok="url", query=link) | |
result = response.get("result", {}) | |
return SuccessResponse( | |
status="True", | |
randydev={"results": result} | |
) | |
except: | |
return SuccessResponse( | |
status="False", | |
randydev={"error": "Error fucking"} | |
) | |
async def tiktokdl_(link: str): | |
try: | |
response = await api_tools_fix(name="tiktokdl", ok="url", query=link) | |
result = response.get("result", {}) | |
return SuccessResponse( | |
status="True", | |
randydev={"results": result} | |
) | |
except: | |
return SuccessResponse( | |
status="False", | |
randydev={"error": "Error fucking"} | |
) | |
async def xnxx_search(query: str, quality: str): | |
data_dict = { | |
"720p": search_filters.SearchingQuality.X_720p, | |
"1080p": search_filters.SearchingQuality.X_1080p_plus | |
} | |
try: | |
if quality not in data_dict: | |
return SuccessResponse( | |
status="False", | |
randydev={"error": "Invalid quality"} | |
) | |
SearchingQuality = data_dict[quality] | |
search = xnxx_client().search( | |
query, | |
length=search_filters.Length.X_0_10min, | |
upload_time=search_filters.UploadTime.year, | |
searching_quality=SearchingQuality, | |
limit=1 | |
) | |
response = search.videos | |
results = [] | |
for x in response: | |
results.append({ | |
"link": x.content_url, | |
"title": x.title, | |
"author": x.author, | |
"length": x.length, | |
"highest_quality": x.highest_quality, | |
"publish_date": x.publish_date, | |
"views": x.views, | |
"thumb": x.thumbnail_url | |
}) | |
return SuccessResponse( | |
status="True", | |
randydev={ | |
"results": results | |
} | |
) | |
except Exception as e: | |
return SuccessResponse( | |
status="False", | |
randydev={"error": f"Error: {e}"} | |
) | |
async def xnxx_download(link: str): | |
try: | |
x = xnxx_client() | |
response = x.get_video(link) | |
return SuccessResponse( | |
status="True", | |
randydev={ | |
"title": response.title, | |
"author": response.author, | |
"length": response.highest_quality, | |
"highest_quality": response.highest_quality, | |
"publish_date": response.publish_date, | |
"views": response.views, | |
"link": response.content_url, | |
"thumb": response.thumbnail_url | |
} | |
) | |
except Exception as e: | |
return SuccessResponse( | |
status="False", | |
randydev={"error": f"Error fucking: {e}"} | |
) | |
async def instagramdl(link: str): | |
try: | |
response = await api_tools_fix(name="instagramdl", ok="url", query=link) | |
result = response.get("result", {}) | |
wm = results.get("wm").replace("Powered By Betabotz", "Powered By Randydev") | |
thumbnail = results.get("thumbnail") | |
_url = results.get("_url") | |
return SuccessResponse( | |
status="True", | |
randydev={"wm": wm, "thumb": thumbnail, "url": _url} | |
) | |
except: | |
return SuccessResponse( | |
status="False", | |
randydev={"error": "Error fucking"} | |
) | |
async def upload_file(file: UploadFile = File(...)): | |
try: | |
ext = file.filename.split(".")[-1] | |
unique_filename = f"{uuid.uuid4().hex}.{ext}" | |
file_location = os.path.join(UPLOAD_DIRECTORY, unique_filename) | |
with open(file_location, "wb") as f: | |
f.write(await file.read()) | |
return JSONResponse( | |
status_code=200, | |
content={"url": f"https://randydev-ryuzaki-api.hf.space/uploads/{unique_filename}"} | |
) | |
except Exception as e: | |
return JSONResponse( | |
status_code=500, | |
content={"error": str(e)} | |
) | |
async def serve_file(filename: str): | |
file_location = os.path.join(UPLOAD_DIRECTORY, filename) | |
if os.path.exists(file_location): | |
return FileResponse(file_location) | |
return JSONResponse( | |
status_code=404, | |
content={"error": "File not found"} | |
) | |
RAMDOM_STATUS = [ | |
"civilian", | |
"wanted", | |
"undercover", | |
"rogue_agent", | |
"innocent", | |
"fugitive", | |
"covert_operator", | |
"spammer", | |
] | |
async def remove_sibyl_system_banned(user_id): | |
update_doc = { | |
"sibyl_ban": None, | |
"reason_sibyl": None, | |
"is_banned_sibly": None, | |
"date_joined_sib": None, | |
"sibyl_userid": None | |
} | |
return await collection.update_one({"user_id": user_id}, {"$unset": update_doc}, upsert=True) | |
async def new_user_spammers( | |
user_id, | |
hashtag, | |
reason, | |
is_banned: bool, | |
date_joined, | |
): | |
update_doc = { | |
"user_id": user_id, | |
"hashtag": hashtag, | |
"reason": reason, | |
"date_joined": date_joined, | |
"is_banned": is_banned | |
} | |
return await collection.update_one({"user_id": user_id}, {"$set": update_doc}, upsert=True) | |
async def new_sibyl_system_banned(user_id, name, reason, date_joined): | |
update_doc = { | |
"sibyl_ban": name, | |
"reason_sibyl": reason, | |
"is_banned_sibly": True, | |
"date_joined_sib": date_joined, | |
"sibyl_userid": user_id | |
} | |
return await collection.update_one({"user_id": user_id}, {"$set": update_doc}, upsert=True) | |
async def cybersecuritydb(user_id, mongo_url): | |
update_doc = {"mongodb": mongo_url} | |
return await collection.update_one({"user_id": user_id}, {"$set": update_doc}, upsert=True) | |
async def get_sibyl_system_banned(user_id): | |
user = await collection.find_one({"user_id": user_id}) | |
if user: | |
sibyl_name = user.get("sibyl_ban") | |
reason = user.get("reason_sibyl") | |
is_banned = user.get("is_banned_sibly") | |
date_joined = user.get("date_joined_sib") | |
sibyl_user_id = user.get("sibyl_userid") | |
return sibyl_name, reason, is_banned, date_joined, sibyl_user_id | |
else: | |
return None | |
async def get_fedbans_(user_id): | |
user = await collection.find_one({"user_id": user_id}) | |
if user: | |
user_id = user.get("user_id") | |
hashtag = user.get("hashtag") | |
reason = user.get("reason") | |
is_banned = user.get("is_banned") | |
date_joined = user.get("date_joined") | |
return [ | |
user_id, | |
hashtag, | |
reason, | |
is_banned, | |
date_joined | |
] | |
else: | |
return None | |
async def get_all_banned(): | |
banned_users = [] | |
users = await collection.find({}) | |
for user_id in users: | |
reason = user_id.get("reason_sibyl") | |
user_id = user_id.get("sibyl_userid") | |
banned_users.append({"user_id": user_id, "reason": reason}) | |
return banned_users | |
async def new_profile_clone( | |
user_id, | |
first_name, | |
last_name=None, | |
profile_id=None, | |
bio=None | |
): | |
update_doc = { | |
"first_name": first_name, | |
"last_name": last_name, | |
"profile_id": profile_id, | |
"bio": bio | |
} | |
await collection.update_one({"user_id": user_id}, {"$set": update_doc}, upsert=True) | |
async def get_profile_clone(user_id): | |
user = await collection.find_one({"user_id": user_id}) | |
if user: | |
first_name = user.get("first_name") | |
last_name = user.get("last_name") | |
profile_id = user.get("profile_id") | |
bio = user.get("bio_2") | |
return [first_name, last_name, profile_id, bio] | |
else: | |
return None | |
async def new_verify_otp( | |
user_id, | |
otp_code, | |
ip_addres, | |
auth_status | |
): | |
update_doc = { | |
"user_id": user_id, | |
"otp_code": otp_code, | |
"ip_addres": ip_addres, | |
"auth_status": auth_status, | |
} | |
await collection.update_one({"user_id": user_id}, {"$set": update_doc}, upsert=True) | |
async def get_stored_otp(user_id: int): | |
user = await collection.find_one({"user_id": user_id}) | |
if user and "otp_code" in user: | |
return user["otp_code"] | |
return None | |
def generate_otp(length=6): | |
"""Generate a random OTP""" | |
digits = string.digits | |
otp = ''.join(random.choice(digits) for i in range(length)) | |
return otp | |
class OTPCallbackPayload(BaseModel): | |
user_id: int | |
async def get_verify_otp(user_id: int, request: Request): | |
try: | |
client_host = request.client.host | |
get_otp_code = await get_stored_otp(user_id) | |
if get_otp_code: | |
return SuccessResponse( | |
status="True", | |
randydev={ | |
"otp_code": get_otp_code, | |
"ip_addres": client_host, | |
"auth_status": "verified", | |
} | |
) | |
else: | |
return SuccessResponse( | |
status="False", | |
randydev={ | |
"otp_code": None, | |
"ip_addres": client_host, | |
"auth_status": "unverified", | |
} | |
) | |
except Exception: | |
return SuccessResponse( | |
status="False", | |
randydev={"message": "Failed unverified"} | |
) | |
async def verify_otp(payload: OTPCallbackPayload, request: Request): | |
try: | |
client_host = request.client.host | |
otp = generate_otp() | |
await new_verify_otp( | |
payload.user_id, | |
otp, | |
ip_addres=client_host, | |
auth_status="verified" | |
) | |
return SuccessResponse( | |
status="True", | |
randydev={ | |
"message": "successfully verified", | |
"otp_code": otp, | |
"ip_addres": client_host, | |
"auth_status": "verified", | |
} | |
) | |
except Exception: | |
return SuccessResponse( | |
status="False", | |
randydev={ | |
"message": "Failed unverified", | |
"otp_code": None, | |
"ip_addres": client_host, | |
"auth_status": "unverified", | |
} | |
) | |
async def profile_clone( | |
item: ProfileClone, | |
api_key: None = Depends(validate_api_key) | |
): | |
try: | |
await new_profile_clone( | |
user_id=item.user_id, | |
first_name=item.first_name, | |
last_name=item.last_name, | |
profile_id=item.profile_id, | |
bio=item.bio | |
) | |
return SuccessResponse( | |
status="True", | |
randydev={ | |
"user_id": item.user_id, | |
"first_name": item.first_name, | |
"last_name": item.last_name, | |
"profile_id": item.profile_id, | |
"bio": item.bio | |
} | |
) | |
except Exception: | |
return SuccessResponse(status="False", randydev={"message": "Internal server error."}) | |
async def get_profile_( | |
item: GetProfileClone, | |
api_key: None = Depends(validate_api_key) | |
): | |
try: | |
response = await get_profile_clone(item.user_id) | |
if response[0]: | |
return SuccessResponse( | |
status="True", | |
randydev={ | |
"user_id": item.user_id, | |
"first_name": response[0], | |
"last_name": response[1], | |
"profile_id": response[2], | |
"bio": response[3], | |
} | |
) | |
else: | |
return SuccessResponse( | |
status="False", | |
randydev={"message": "Not found user"} | |
) | |
except Exception: | |
return SuccessResponse(status="False", randydev={"message": "Internal server error."}) | |
async def sibyl_get_all_banlist(): | |
banned_users = await get_all_banned() | |
return { | |
"status": "True", | |
"randydev": { | |
"results": banned_users | |
} | |
} | |
async def blacklist_words(): | |
try: | |
BLACKLIST_WORDS = BadWordsList() | |
results_all = BLACKLIST_WORDS.banned_by_google(file_txt="banned_by_google.txt", storage=True) | |
return {"status": "true", "results": results_all} | |
except Exception as e: | |
return {"status": "false", "message": f"Internal server error: {str(e)}"} | |
async def sibyl_system_delete( | |
item: SibylSystemDel, | |
api_key: None = Depends(validate_api_key_only_devs) | |
): | |
try: | |
_, _, _, _, sibyl_user_id = await get_sibyl_system_banned(item.user_id) | |
if sibyl_user_id: | |
await remove_sibyl_system_banned(item.user_id) | |
return SuccessResponse( | |
status="True", | |
randydev={"message": f"Successfully removed {item.user_id} from the Sibyl ban list"} | |
) | |
else: | |
return SuccessResponse( | |
status="False", | |
randydev={"message": "Not Found UserID"} | |
) | |
except Exception as e: | |
return SuccessResponse(status="False", randydev={"message": "Internal server error."}) | |
async def getfedbans_(payload: GetsaFedBans, api_key: str = Depends(validate_api_key_fedbans)): | |
users_bans = await get_fedbans_(payload.user_id) | |
try: | |
if users_bans[3] == True: | |
return SuccessResponse( | |
status="True", | |
randydev={ | |
"user_id": users_bans[0], | |
"hashtag": users_bans[1], | |
"reason": users_bans[2], | |
"is_banned": users_bans[3], | |
"date_joined": users_bans[4], | |
"message": f"#GBANNED Successfully banned {payload.user_id} from the fedban list." | |
} | |
) | |
else: | |
return SuccessResponse( | |
status="False", | |
randydev={ | |
"user_id": None, | |
"hashtag": None, | |
"reason": None, | |
"is_banned": False, | |
"date_joined": None, | |
"message": f"Not successful" | |
} | |
) | |
except Exception as e: | |
return SuccessResponse( | |
status="False", | |
randydev={ | |
"message": "Not successful", | |
"error": str(e) | |
} | |
) | |
async def fedbans_(payload: FedBans, api_key: str = Depends(validate_api_key_fedbans)): | |
if payload.user_id == 1191668125: | |
return SuccessResponse(status="False", randydev={"message": "Only Developer."}) | |
date_joined = str(dt.now()) | |
if not payload.hashtag.startswith("#"): | |
return SuccessResponse(status="False", randydev={"message": "Invalid hashtag."}) | |
try: | |
await new_user_spammers( | |
user_id=payload.user_id, | |
hashtag=payload.hashtag, | |
reason=payload.reason, | |
is_banned=True, | |
date_joined=date_joined | |
) | |
return SuccessResponse( | |
status="True", | |
randydev={ | |
"user_id": payload.user_id, | |
"hashtag": payload.hashtag, | |
"reason": payload.reason, | |
"is_banned": True, | |
"date_joined": date_joined, | |
"message": f"#GBANNED Successfully banned {payload.user_id} from the fedban list." | |
} | |
) | |
except Exception as e: | |
return SuccessResponse( | |
status="False", | |
randydev={ | |
"user_id": None, | |
"hashtag": None, | |
"reason": None, | |
"is_banned": False, | |
"date_joined": None, | |
"message": "Not successful", | |
"error": str(e) | |
} | |
) | |
async def sibyl_system_ban( | |
item: SibylSystemBan, | |
api_key: None = Depends(validate_api_key) | |
): | |
if item.user_id == 1191668125: | |
return SuccessResponse(status="False", randydev={"message": "Only Developer."}) | |
try: | |
date_joined = str(dt.now()) | |
sibyl_ban = random.choice(RAMDOM_STATUS) | |
_, _, is_banned, _, sibyl_user_id = await get_sibyl_system_banned(item.user_id) | |
if sibyl_user_id is not None and is_banned: | |
return SuccessResponse(status="False", randydev={"message": "User is already banned."}) | |
await new_sibyl_system_banned(item.user_id, sibyl_ban, item.reason, date_joined) | |
return SuccessResponse( | |
status="True", | |
randydev={ | |
"user_id": item.user_id, | |
"sibyl_name": sibyl_ban, | |
"reason": item.reason, | |
"date_joined": date_joined, | |
"message": f"Successfully banned {item.user_id} from the Sibyl ban list." | |
} | |
) | |
except Exception: | |
return SuccessResponse(status="False", randydev={"message": "Internal server error."}) | |
async def sibyl_system( | |
item: SibylSystem, | |
api_key: None = Depends(validate_api_key) | |
): | |
sibyl_result = await get_sibyl_system_banned(item.user_id) | |
if sibyl_result: | |
sibyl_name, reason, is_banned, date_joined, sibyl_user_id = sibyl_result | |
return SuccessResponse( | |
status="True", | |
randydev={ | |
"sibyl_name": sibyl_name, | |
"reason": reason, | |
"is_banned": is_banned, | |
"date_joined": date_joined, | |
"sibyl_user_id": sibyl_user_id | |
} | |
) | |
else: | |
return SuccessResponse( | |
status="False", | |
randydev={ | |
"message": "Not found user" | |
} | |
) | |
async def pypi_search( | |
item: TextCustom, | |
): | |
try: | |
response = System.show(f"pip3 show {item.query}") | |
return SuccessResponse( | |
status="True", | |
randydev={"results": response}) | |
except: | |
return SuccessResponse( | |
status="True", | |
randydev={"message": "Error not responding"}) | |
async def get_translate( | |
item: TranslateCustom, | |
): | |
try: | |
source = trans.detect(item.text) | |
translation = trans(item.text, sourcelang=source, targetlang=item.setlang) | |
return SuccessResponse( | |
status="True", | |
randydev={ | |
"translation": translation.text, | |
"translation_original": item.text | |
} | |
) | |
except: | |
return SuccessResponse( | |
status="False", | |
randydev={"message": "Error not responding"}) | |
async def google_reverse( | |
item: GoogleReverse, | |
api_key: None = Depends(validate_api_key) | |
): | |
params = { | |
"api_key": REVERSE_IMAGE_API, | |
"engine": item.engine, | |
"image_url": item.image_url, | |
"hl": item.language, | |
"gl": item.google_lang | |
} | |
try: | |
search = GoogleSearch(params) | |
results = search.get_dict() | |
link = results["search_metadata"]["google_reverse_image_url"] | |
total_time_taken = results["search_metadata"]["total_time_taken"] | |
create_at = results["search_metadata"]["created_at"] | |
processed_at = results["search_metadata"]["processed_at"] | |
return SuccessResponse( | |
status="True", | |
randydev={ | |
"link": link, | |
"total_time_taken": total_time_taken, | |
"create_at": create_at, | |
"processed_at": processed_at | |
} | |
) | |
except Exception: | |
return {"status": "false", "message": "Internal server error"} | |
async def ocr_space_url( | |
item: OrcSpaceUrl, | |
api_key: None = Depends(validate_api_key) | |
): | |
payload = { | |
"url": item.url, | |
"isOverlayRequired": item.overlay, | |
"apikey": OCR_API_KEY, | |
"language": item.language | |
} | |
try: | |
response = requests.post(SOURCE_OCR_URL, data=payload) | |
response.raise_for_status() | |
test_url = response.content.decode() | |
except requests.exceptions.RequestException as e: | |
return f"Error: {str(e)}" | |
try: | |
parsed_response = json.loads(test_url) | |
if "ParsedResults" in parsed_response and len(parsed_response["ParsedResults"]) > 0: | |
return SuccessResponse( | |
status="True", | |
randydev={ | |
"text": parsed_response["ParsedResults"][0]["ParsedText"] | |
} | |
) | |
else: | |
return {"status": "false", "message": "Error response."} | |
except (json.JSONDecodeError, KeyError): | |
return "Error parsing the OCR response." | |
async def chatgpt_olds(item: ChatgptCustom): | |
try: | |
headers = { | |
"Accept": "application/json", | |
"Content-Type": "application/json", | |
"Accept-Language": "en", | |
"Connection": "keep-alive", | |
"Origin": "https://remix.ethereum.org/", | |
"Referer": "https://remix.ethereum.org/", | |
"Sec-Fetch-Dest": "empty", | |
"Sec-Fetch-Mode": "cors", | |
"Sec-Fetch-Site": "cross-site", | |
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/17.17134" | |
} | |
payload = {"prompt": item.query} | |
type_urls = "openai-gpt" | |
url = f"https://{type_urls}.remixproject.org" | |
response = requests.post(url, data=json.dumps(payload), headers=headers, timeout=50000) | |
if response.status_code != 200: | |
return SuccessResponse(status="False", randydev={"message": contact_support}) | |
return SuccessResponse(status="True", randydev={"message": response.json()["choices"][0]["message"]["content"]}) | |
except requests.exceptions.ConnectTimeout: | |
return SuccessResponse(status="False", randydev={"message": contact_support}) | |
except Exception as e: | |
return SuccessResponse(status="False", randydev={"message": contact_support}) | |
async def ryuzaki_ai( | |
item: RyuzakiAi, | |
api_key: None = Depends(validate_api_key) | |
): | |
try: | |
response_data = code.ryuzaki_ai_text(item.text) | |
if isinstance(response_data, list) and len(response_data) > 0: | |
first_result = response_data[0] | |
if "generated_text" in first_result: | |
message = first_result["generated_text"] | |
return SuccessResponse(status="True", randydev={"ryuzaki_text": message}) | |
return SuccessResponse(status="False", randydev={"message": contact_support}) | |
except Exception: | |
return SuccessResponse(status="False", randydev={"message": contact_support}) | |
BingImages = BingClient() | |
async def bing_dalle(item: BingDalle, api_key: None = Depends(validate_api_key)): | |
try: | |
set_cookies( | |
".bing.com", | |
{ | |
"_U": item.cookie | |
}, | |
) | |
except requests.exceptions.RequestException: | |
raise HTTPException(status_code=500, detail="Invalid cookie string, check your cookie string and try again") | |
try: | |
response = BingImages.images.generate( | |
prompt=item.prompt, | |
model=item.model, | |
) | |
return SuccessResponse(status="True", randydev={"data": response.data[0].url}) | |
except BaseException as e: | |
return SuccessResponse(status="False", randydev={"data": f"Error: {e}"}) | |
def get_image_urls(query): | |
headers = {"accept":"application/json", "Content-Type": "application/json"} | |
response = requests.get("https://ufoptg-ufop-api.hf.space/dall-e-3/prompt=" + query, headers=headers).json() | |
return [response[f"S{i}-Image"] for i in range(1, 5)] | |
async def dalle_3xl( | |
item: Dalle3XL, | |
api_key: None = Depends(validate_api_key) | |
): | |
try: | |
x_image = get_image_urls(item.query) | |
except Exception: | |
raise HTTPException( | |
status_code=500, | |
detail=contact_support | |
) | |
if x_image: | |
return SuccessResponse( | |
status="True", | |
randydev={"url": x_image}) | |
else: | |
return SuccessResponse(status="False", randydev={"data": contact_support}) | |
async def open_dalle( | |
item: OpenDalle, | |
api_key: None = Depends(validate_api_key) | |
): | |
API_URL = SOURCE_OPENDALLE_URL | |
try: | |
payload = {"inputs": item.query} | |
headers = {"Authorization": f"Bearer {HUGGING_TOKEN}"} | |
response = requests.post(API_URL, headers=headers, json=payload) | |
response.raise_for_status() | |
except requests.exceptions.RequestException: | |
raise HTTPException( | |
status_code=500, | |
detail=internal_error | |
) | |
try: | |
encoded_string = base64.b64encode(response.content).decode("utf-8") | |
except Exception: | |
raise HTTPException( | |
status_code=500, | |
detail=contact_support | |
) | |
if encoded_string: | |
return SuccessResponse(status="True", randydev={"data": encoded_string}) | |
else: | |
return SuccessResponse(status="False", randydev={"data": contact_support}) | |
async def Picsart_Pro( | |
item: Picsart, | |
api_key: None = Depends(validate_api_key) | |
): | |
API_URL = SOURCE_PICSART_URL | |
try: | |
if not os.path.exists(item.image_path): | |
raise HTTPException(status_code=400, detail="Image file not found") | |
files = {"image": ("brosur.png", open(item.image_path, "rb"), "image/png")} | |
payload = {"format": "PNG", "output_type": "cutout"} | |
headers = {"accept": "application/json", "x-picsart-api-key": PICSART_API_KEY} | |
with requests.post(url, headers=headers, data=payload, files=files) as response: | |
response.raise_for_status() | |
response_data = response.json() | |
urls = response_data["data"]["url"] | |
except requests.exceptions.RequestException as e: | |
logger.error(f"Error in Picsart request: {e}") | |
raise HTTPException( | |
status_code=500, | |
detail=internal_error | |
) | |
try: | |
response_two = requests.get(urls) | |
response_two.raise_for_status() | |
encoded_string = base64.b64encode(response_two.content).decode("utf-8") | |
except requests.exceptions.RequestException as e: | |
logger.error(f"Error in Picsart image retrieval: {e}") | |
raise HTTPException( | |
status_code=500, | |
detail=contact_support | |
) | |
if encoded_string: | |
return SuccessResponse(status="True", randydev={"data": encoded_string}) | |
else: | |
return SuccessResponse(status="False", randydev={"data": contact_support}) | |
async def Anime_Styled( | |
item: AnimeStyled, | |
api_key: None = Depends(validate_api_key) | |
): | |
API_URL = SOURCE_ANIME_STYLED_URL | |
try: | |
payload = {"inputs": item.query} | |
headers = {"Authorization": f"Bearer {HUGGING_TOKEN}"} | |
response = requests.post(API_URL, headers=headers, json=payload) | |
response.raise_for_status() | |
except requests.exceptions.RequestException: | |
raise HTTPException( | |
status_code=500, | |
detail=internal_error | |
) | |
try: | |
encoded_string = base64.b64encode(response.content).decode("utf-8") | |
except Exception: | |
raise HTTPException( | |
status_code=500, | |
detail=contact_support | |
) | |
if encoded_string: | |
return SuccessResponse(status="True", randydev={"data": encoded_string}) | |
else: | |
return SuccessResponse(status="False", randydev={"data": contact_support}) | |
async def image_unsplash(item: GetImageUnsplash): | |
url = SOURCE_UNSPLASH_URL | |
image_url = f"{url}/?{item.query}/{item.size}" | |
try: | |
response = requests.get(image_url) | |
response.raise_for_status() | |
except requests.exceptions.RequestException: | |
raise HTTPException( | |
status_code=500, | |
detail=internal_error | |
) | |
try: | |
encoded_string = base64.b64encode(response.content).decode("utf-8") | |
except Exception: | |
raise HTTPException( | |
status_code=500, | |
detail=contact_support | |
) | |
if encoded_string: | |
return SuccessResponse(status="True", randydev={"data": encoded_string}) | |
else: | |
return SuccessResponse(status="False", randydev={"data": "Not found image data"}) | |
async def chatgpt3_turbo( | |
item: Chatgpt3Texts, | |
user_id: int=None, | |
api_key: None = Depends(validate_api_key) | |
): | |
if item.is_openai_original: | |
try: | |
GPTbase = "https://gpt-api.mycloud.im/v1" | |
response = await OpenAI(api_key=item.api_key, api_base=GPTbase).chat_message_turbo( | |
query=item.query, | |
model=item.model, | |
user_id=user_id, | |
is_stream=item.is_stream | |
) | |
answer = response | |
return SuccessResponse( | |
status="True", | |
randydev={"message": answer} | |
) | |
except Exception as e: | |
return SuccessResponse(status="False", randydev={"message": str(e)}) | |
else: | |
url = "https://lexica.qewertyy.me/models" | |
params = {"model_id": 5, "prompt": item.query} | |
response = requests.post(url, params=params) | |
if response.status_code != 200: | |
return SuccessResponse(status="False", randydev={"message": contact_support}) | |
check_response = response.json() | |
answer = check_response.get("content") | |
return SuccessResponse(status="True", randydev={"message": answer}) | |
async def chatgpt_customs( | |
item: ChatgptNewo, | |
api_key: None = Depends(validate_api_key) | |
): | |
try: | |
GPTbase = "https://gpt-api.mycloud.im/v1" | |
x = OpenAI(api_base=GPTbase) | |
answer = await x.api_chat(query=item.query, model=item.model) | |
return SuccessResponse( | |
status="True", | |
randydev={"message": answer} | |
) | |
except Exception as e: | |
return SuccessResponse(status="False", randydev={"message": str(e)}) | |
async def v1beta3_google_ai( | |
item: ChatgptCustom, | |
api_key: None = Depends(validate_api_key) | |
): | |
url = SOURCE_ASSISTANT_GOOGLE_AI | |
token = ASSISTANT_GOOGLE_API_KEYS | |
api_url = f"{SOURCE_ASSISTANT_GOOGLE_AI}/v1beta3/models/text-bison-001:generateText?key={ASSISTANT_GOOGLE_API_KEYS}" | |
try: | |
headers = {"Content-Type": "application/json"} | |
data = { | |
"prompt": { | |
"text": item.query | |
} | |
} | |
response = requests.post(api_url, headers=headers, json=data) | |
response_str = response.json() | |
answer = response_str["candidates"] | |
for results in answer: | |
message = results.get("output") | |
return SuccessResponse(status="True", randydev={"message": message}) | |
except: | |
return SuccessResponse(status="False", randydev={"message": internal_error}) | |
async def faceai(item: BetaRags): | |
try: | |
x = FaceAI( | |
clients_name=item.clients_name, | |
token=HUGGING_TOKEN | |
) | |
response = await x.chat(item.query, no_db=True) | |
return SuccessResponse( | |
status="True", | |
randydev={"message": response} | |
) | |
except Exception as e: | |
return SuccessResponse(status="False", randydev={"message": str(e)}) | |
async def blackbox_ai(item: ChatgptCustom): | |
try: | |
response = send_blackbox_chat(item.query) | |
return SuccessResponse(status="True", randydev={"message": response}) | |
except Exception as e: | |
return SuccessResponse(status="False", randydev={"message": str(e)}) | |
async def gemini_pro(item: GeminiPro): | |
owner_base = f""" | |
Your name is Randy Dev. A kind and friendly AI assistant that answers in | |
a short and concise answer. Give short step-by-step reasoning if required. | |
Today is {dt.now():%A %d %B %Y %H:%M} | |
""" | |
if item.is_multi_chat: | |
selected_api_key = ASSISTANT_GOOGLE_API_KEYS or item.gemini_api_key | |
try: | |
geni = GeminiLatest( | |
api_keys=selected_api_key, | |
mongo_url=item.mongo_url, | |
user_id=item.user_id | |
) | |
await cybersecuritydb(item.user_id, item.mongo_url) | |
answer, gemini_chat = geni._GeminiLatest__get_response_gemini(item.query) | |
return SuccessResponse( | |
status="True", | |
randydev={ | |
"message": answer, | |
"chat_history": gemini_chat | |
} | |
) | |
except Exception: | |
return SuccessResponse(status="False", randydev={"message": contact_support}) | |
else: | |
if item.is_login: | |
token = item.bard_api_key | |
else: | |
token = COOKIE_BARD_TOKEN | |
try: | |
session = requests.Session() | |
session.headers = { | |
"Host": "bard.google.com", | |
"X-Same-Domain": "1", | |
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36", | |
"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8", | |
"Origin": "https://bard.google.com", | |
"Referer": "https://bard.google.com/" | |
} | |
session.cookies.set("__Secure-1PSID", token) | |
bard = Bard(token=token, session=session, timeout=30) | |
bard.get_answer(owner_base)["content"] | |
message = bard.get_answer(item.query)["content"] | |
return SuccessResponse(status="True", randydev={"message": message}) | |
except: | |
return SuccessResponse(status="False", randydev={"message": contact_support}) | |
async def v1beta2_google_ai( | |
item: ChatgptCustom, | |
api_key: None = Depends(validate_api_key) | |
): | |
url = SOURCE_ASSISTANT_GOOGLE_AI | |
token = ASSISTANT_GOOGLE_API_KEYS | |
api_url = f"{SOURCE_ASSISTANT_GOOGLE_AI}/v1beta2/models/chat-bison-001:generateMessage?key={ASSISTANT_GOOGLE_API_KEYS}" | |
try: | |
headers = {"Content-Type": "application/json"} | |
data = { | |
"prompt": { | |
"messages": [{"content": item.query}]} | |
} | |
response = requests.post(api_url, headers=headers, json=data) | |
response_str = response.json() | |
answer = response_str["candidates"] | |
for results in answer: | |
message = results.get("content") | |
return SuccessResponse(status="True", randydev={"message": message}) | |
except: | |
return SuccessResponse(status="False", randydev={"message": internal_error}) | |
async def new_monitor( | |
item: NewMonitor, | |
api_key: None = Depends(validate_api_key) | |
): | |
urls = SOURCE_MONITOR_URL | |
token = MONITOR_API_KEYS | |
api_url = f"{urls}/newMonitor" | |
try: | |
headers = { | |
"content-type": "application/x-www-form-urlencoded", | |
"cache-control": "no-cache" | |
} | |
payload = { | |
"api_key": token, | |
"format": "json", | |
"type": item.type, | |
"url": item.url, | |
"friendly_name": item.friendly_name | |
} | |
response = requests.post(api_url, data=payload, headers=headers) | |
response_str = response.json() | |
status_ok = response_str["stat"] | |
monitor_id = response_str["monitor"].get("id") | |
monitor_status = response_str["monitor"].get("status") | |
return { | |
"status": "true", | |
"randydev":{ | |
"status_ok": status_ok, | |
"monitor_id": monitor_id, | |
"monitor_status": monitor_status | |
} | |
} | |
except: | |
return {"status": "false", "message": "Error response."} | |
async def getMonitors( | |
item: GetMonitorLogs, | |
api_key: None = Depends(validate_api_key) | |
): | |
url = SOURCE_MONITOR_URL | |
token = MONITOR_API_KEYS | |
api_url = f"{url}/getMonitors" | |
try: | |
headers = { | |
"content-type": "application/x-www-form-urlencoded", | |
"cache-control": "no-cache" | |
} | |
payload = { | |
"api_key": token, | |
"format": "json", | |
"logs": item.logs | |
} | |
response = requests.post(api_url, data=payload, headers=headers) | |
response_str = response.json() | |
data = response_str["monitors"] | |
url_list = [] | |
for x in data: | |
url = x.get("url") | |
if url: | |
url_list.append(url) | |
return { | |
"status": "true", | |
"randydev":{ | |
"url": url_list, | |
} | |
} | |
except: | |
return {"status": "false", "message": "Error response."} | |
async def get_data(username): | |
base_msg = "" | |
async with AsyncClient() as gpx: | |
req = (await gpx.get(f"https://api.github.com/users/{username}")).json() | |
try: | |
avatar = req["avatar_url"] | |
twitter = req['twitter_username'] | |
base_msg += "**❆ Gitub Information ❆** \n\n" | |
base_msg += f"**Profile Url:** {req['html_url']} \n" | |
base_msg += f"**Name:** `{req['name']}` \n" | |
base_msg += f"**Username:** `{req['login']}` \n" | |
base_msg += f"**User ID:** `{req['id']}` \n" | |
base_msg += f"**Location:** `{req['location']}` \n" | |
base_msg += f"**Company:** `{req['company']}` \n" | |
base_msg += f"**Blog:** `{req['name']}` \n" | |
base_msg += f"**Twitter:** `{f'https://twitter.com/{twitter}' if twitter else 'None'}` \n" | |
base_msg += f"**Bio:** `{req['bio']}` \n" | |
base_msg += f"**Public Repos:** `{req['public_repos']}` \n" | |
base_msg += f"**Public Gists:** `{req['public_gists']}` \n" | |
base_msg += f"**Followers:** `{req['followers']}` \n" | |
base_msg += f"**Following:** `{req['following']}` \n" | |
base_msg += f"**Created At:** `{req['created_at']}` \n" | |
base_msg += f"**Update At:** `{req['updated_at']}` \n" | |
return [base_msg, avatar] | |
except Exception as e: | |
base_msg += f"**An error occured while parsing the data!** \n\n**Traceback:** \n `{e}` \n\n`Make sure that you've sent the command with the correct username!`" | |
return [base_msg, "https://telegra.ph//file/32f69c18190666ea96553.jpg"] | |
async def github(item: GithubUsernames): | |
try: | |
details = await get_data(item.username) | |
return { | |
"status": "true", | |
"randydev":{ | |
"avatar": details[1], | |
"results": details[0] | |
} | |
} | |
except: | |
return {"status": "false", "message": "Error response."} | |
async def webshot(item: Webshot): | |
try: | |
required_url = f"https://mini.s-shot.ru/{item.quality}/{item.type_mine}/{item.pixels}/{item.cast}/?{item.url}" | |
return { | |
"status": "true", | |
"randydev":{ | |
"image_url": required_url | |
} | |
} | |
except: | |
return {"status": "false", "message": "Error response."} | |
async def chatbot(item: ChatBots): | |
api_url = b64decode("aHR0cHM6Ly9hcGkuc2Fmb25lLmRldi9jaGF0Ym90").decode("utf-8") | |
params = { | |
"query": item.query, | |
"user_id": item.user_id, | |
"bot_name": item.bot_name, | |
"bot_master": item.bot_username | |
} | |
x = requests.get(f"{api_url}", params=params) | |
if x.status_code != 200: | |
return "Error api request" | |
try: | |
y = x.json() | |
response = y["response"] | |
return { | |
"status": "true", | |
"randydev":{ | |
"message": response | |
} | |
} | |
except: | |
return {"status": "false", "message": "Error response."} | |
async def get_llama(item: ChatgptCustom): | |
api_url = SOURCE_WHAT_GAY_URL | |
params = {"query": item.query} | |
x = requests.get(f"{api_url}/llama", params=params) | |
if x.status_code != 200: | |
return SuccessResponse(status="False", randydev={"message": contact_support}) | |
try: | |
y = x.json() | |
response = y["answer"] | |
return SuccessResponse(status="True", randydev={"message": response}) | |
except: | |
return SuccessResponse(status="False", randydev={"message": contact_support}) | |
async def waifu_pics(item: WaifuPics): | |
waifu_api = f"{SOURCE_WAIFU_URL}/{item.types}" | |
waifu_param = f"{waifu_api}/{item.category}" | |
response = requests.get(waifu_param) | |
if response.status_code != 200: | |
return "Sorry, there was an error processing your request. Please try again later" | |
data_waifu = response.json() | |
try: | |
waifu_image_url = data_waifu["url"] | |
except Exception as e: | |
return f"Error request {e}" | |
if waifu_image_url: | |
if item.is_bytes: | |
try: | |
response_two = requests.get(waifu_image_url) | |
response_two.raise_for_status() | |
except requests.exceptions.RequestException: | |
raise HTTPException(status_code=500, detail="Internal server error") | |
return StreamingResponse(BytesIO(response_two.content), media_type=item.media_type) | |
else: | |
return { | |
"status": "true", | |
"randydev":{ | |
"image_url": waifu_image_url | |
} | |
} | |
else: | |
return {"status": "false", "message": "Error response."} | |
async def make_rayso(item: MakeRayso): | |
trans = SyncTranslator() | |
api_url = b64decode("aHR0cHM6Ly9hcGkuc2Fmb25lLm1lL3JheXNv").decode("utf-8") | |
if item.auto_translate: | |
source = trans.detect(item.code) | |
translation = trans(item.code, sourcelang=source, targetlang=item.setlang) | |
code = translation.text | |
else: | |
code = item.code | |
if item.ryuzaki_dark: | |
x = requests.post( | |
f"{api_url}", | |
json={ | |
"code": code, | |
"title": item.title, | |
"theme": item.theme, | |
"darkMode": True | |
} | |
) | |
if x.status_code != 200: | |
return "Error api Gay" | |
data = x.json() | |
try: | |
image_data = base64.b64decode(data["image"]) | |
return { | |
"status": "true", | |
"data":{ | |
"image": image_data | |
} | |
} | |
except: | |
return {"status": "false", "message": "Error response"} | |
else: | |
x = requests.post( | |
f"{api_url}", | |
json={ | |
"code": code, | |
"title": item.title, | |
"theme": item.theme, | |
"darkMode": False | |
} | |
) | |
if x.status_code != 200: | |
return "Error api Gay" | |
data = x.json() | |
try: | |
image_data = base64.b64decode(data["image"]) | |
return { | |
"status": "true", | |
"data":{ | |
"image": image_data | |
} | |
} | |
except: | |
return {"status": "false", "message": "Error response"} | |
async def whois_ip_address(ip_address: str=None): | |
apikey = kc("M0QwN0UyRUFBRjU1OTQwQUY0NDczNEMzRjJBQzdDMUE=").decode("utf-8") | |
location_link = "https" | |
location_api = "api.ip2location.io" | |
location_key = f"key={apikey}" | |
location_search = f"ip={ip_address}" | |
location_param = ( | |
f"{location_link}://{location_api}/?{location_key}&{location_search}" | |
) | |
response = requests.get(location_param) | |
if response.status_code != 200: | |
return "Sorry, there was an error processing your request. Please try again later" | |
data_location = response.json() | |
try: | |
location_ip = data_location["ip"] | |
location_code = data_location["country_code"] | |
location_name = data_location["country_name"] | |
location_region = data_location["region_name"] | |
location_city = data_location["city_name"] | |
location_zip = data_location["zip_code"] | |
location_zone = data_location["time_zone"] | |
location_card = data_location["as"] | |
except Exception as e: | |
return f"error {e}" | |
if ( | |
location_ip | |
and location_code | |
and location_name | |
and location_region | |
and location_city | |
and location_zip | |
and location_zone | |
and location_card | |
): | |
return { | |
"ip_address": location_ip, | |
"country_code": location_code, | |
"region_name": location_region, | |
"city_name": location_city, | |
"zip_code": location_zip, | |
"time_zone": location_zone, | |
"as": location_card | |
} | |
else: | |
return {"status": "false", "message": "Invalid ip address"} | |
async def tiktok_douyin(item: TiktokDownloader): | |
response = requests.get(f"{SOURCE_TIKTOK_WTF_URL}={item.tiktok_url}") | |
if response.status_code != 200: | |
return "Internal server error" | |
try: | |
download_video = response.json()["aweme_list"][0]["video"]["play_addr"]["url_list"][0] | |
download_audio = response.json()["aweme_list"][0]["music"]["play_url"]["url_list"][0] | |
description = response.json()["aweme_list"][0]["desc"] | |
author = response.json()["aweme_list"][0]["author"]["nickname"] | |
request = response.json()["aweme_list"][0]["author"]["signature"] | |
return { | |
"status": "true", | |
"randydev": { | |
"video_url": download_video, | |
"music_url": download_audio, | |
"description": description, | |
"author": author, | |
"request": request | |
} | |
} | |
except: | |
return {"status": "false", "message": "Error request"} | |
async def tiktok_downloader(item: TiktokBeta): | |
api_devs = SOURCE_TIKTOK_TECH_URL | |
parameter = f"tiktok?url={item.tiktok_url}" | |
api_url = f"{api_devs}/{parameter}" | |
response = requests.get(api_url) | |
if response.status_code != 200: | |
return "Error: Unable to fetch data from the TikTok API" | |
try: | |
results = response.json() | |
caption = results.get("result", {}).get("desc", "") | |
if item.only_video: | |
video_url = results.get("result", {}).get("withoutWaterMarkVideo", "") | |
if video_url: | |
return { | |
"download_url": video_url, | |
"caption": caption | |
} | |
else: | |
music_mp3 = results.get("result", {}).get("music", "") | |
if music_mp3: | |
return { | |
"music_url": music_mp3, | |
"caption": caption | |
} | |
return "Error: TikTok data not found or unsupported format" | |
except: | |
return {"status": "false", "message": "Invalid Link"} | |
async def mediafire(item: DownloadLink): | |
try: | |
down_link = str(item.link) | |
mid = down_link.split('/', 5) | |
if mid[3] == "view": | |
mid[3] = "file" | |
down_link = '/'.join(mid) | |
r = requests.get(down_link) | |
soup = BeautifulSoup(r.content, "html.parser") | |
a_href = soup.find("a", {"class": "input popsok"}).get("href") | |
a = str(a_href) | |
id = link.split('/', 5)[4] | |
a_byte = soup.find("a", {"class": "input popsok"}).get_text() | |
a_name = soup.find("div", {"class": "dl-btn-label"}).get_text() | |
details = soup.find("ul", {"class": "details"}) | |
li_items = details.find_all('li')[1] | |
some = li_items.find_all("span")[0].get_text().split() | |
dat = list(some) | |
down = a_byte.replace(" ", "").strip() | |
time = dat[1] | |
date = dat[0] | |
byte = down.split("(", 1)[1].split(")", 1)[0] | |
name = a_name.replace(" ", "").strip() | |
return SuccessResponse( | |
status="True", | |
randydev={ | |
"directDownload": a, | |
"original": item.link, | |
"id": id, | |
"name": name, | |
"readable": byte, | |
"time": byte, | |
"date": date | |
} | |
) | |
except: | |
return {'status': 'false', 'message': 'Invalid Link'} | |
async def gdrive(item: DownloadLink): | |
try: | |
down = link.split('/', 6) | |
url = f'https://drive.google.com/uc?export=download&id={down[5]}' | |
session = requests.Session() | |
response = session.get(url, stream=True) | |
headers = response.headers | |
content_disp = headers.get('content-disposition') | |
filename = None | |
if content_disp: | |
match = re.search(r'filename="(.+)"', content_disp) | |
if match: | |
filename = match.group(1) | |
content_length = headers.get('content-length') | |
last_modified = headers.get('last-modified') | |
content_type = headers.get('content-type') | |
return SuccessResponse( | |
status="True", | |
randydev={ | |
"directDownload": url, | |
"original": item.link, | |
"id": down[5], | |
"name": filename if filename else "No filename provided by the server.", | |
"readable": f"{round(int(content_length) / (1024 * 1024), 2)} MB" if content_length else "No content length provided by the server.", | |
"type": content_type if content_type else "No content type provided by the server.", | |
"DateAndTime": last_modified if last_modified else "No last modified date provided by the server." | |
} | |
) | |
except: | |
return {'status': 'false', 'message': 'Invalid Link'} | |
async def anonfiles(item: DownloadLink): | |
try: | |
r = requests.get(item.link) | |
soup = BeautifulSoup(r.content, "html.parser") | |
a_href = soup.find("a", {"id": "download-url"}).get("href") | |
a = str(a_href) | |
id = link.split('/', 4)[3] | |
jsondata = requests.get(f'https://api.anonfiles.com/v2/file/{id}/info').json() | |
jsondata['data']['file']['url']['directDownload'] = a | |
del jsondata['data']['file']['url']['full'] | |
return jsondata | |
except: | |
return "{'status': 'false', 'message': 'Invalid Link'}" | |
async def filechan(item: DownloadLink): | |
try: | |
r = requests.get(item.link) | |
soup = BeautifulSoup(r.content, "html.parser") | |
a_href = soup.find("a", {"id": "download-url"}).get("href") | |
a = str(a_href) | |
id = link.split('/', 4)[3] | |
jsondata = requests.get(f'https://api.filechan.org/v2/file/{id}/info').json() | |
jsondata['data']['file']['url']['directDownload'] = a | |
del jsondata['data']['file']['url']['full'] | |
return jsondata | |
except: | |
return {'status': 'false', 'message': 'Invalid Link'} | |
async def letsupload(item: DownloadLink): | |
try: | |
r = requests.get(item.link) | |
soup = BeautifulSoup(r.content, "html.parser") | |
a_href = soup.find("a", {"id": "download-url"}).get("href") | |
a = str(a_href) | |
id = link.split('/', 4)[3] | |
jsondata = requests.get(f'https://api.letsupload.cc/v2/file/{id}/info').json() | |
jsondata['data']['file']['url']['directDownload'] = a | |
del jsondata['data']['file']['url']['full'] | |
return jsondata | |
except: | |
return {'status': 'false', 'message': 'Invalid Link'} | |
async def megaupload(item: DownloadLink): | |
try: | |
r = requests.get(item.link) | |
soup = BeautifulSoup(r.content, "html.parser") | |
a_href = soup.find("a", {"id": "download-url"}).get("href") | |
a = str(a_href) | |
id = link.split('/', 4)[3] | |
jsondata = requests.get(f'https://api.megaupload.nz/v2/file/{id}/info').json() | |
jsondata['data']['file']['url']['directDownload'] = a | |
del jsondata['data']['file']['url']['full'] | |
return jsondata | |
except: | |
return {'status': 'false', 'message': 'Invalid Link'} | |
async def myfile(item: DownloadLink): | |
try: | |
r = requests.get(item.link) | |
soup = BeautifulSoup(r.content, "html.parser") | |
a_href = soup.find("a", {"id": "download-url"}).get("href") | |
a = str(a_href) | |
id = link.split('/', 4)[3] | |
jsondata = requests.get(f'https://api.myfile.is/v2/file/{id}/info').json() | |
jsondata['data']['file']['url']['directDownload'] = a | |
del jsondata['data']['file']['url']['full'] | |
return jsondata | |
except: | |
return {'status': 'false', 'message': 'Invalid Link'} | |
description = """ | |
•Developed by [@xtdevs](https://t.me/xtdevs) | |
""" | |
def custom_exception_handler(request: Request, exc: HTTPException) -> JSONResponse: | |
error_detail = [{"error": str(exc.detail)}] | |
custom_error_model = CustomErrorResponseModel(detail=error_detail) | |
return JSONResponse( | |
status_code=exc.status_code, | |
content=custom_error_model.dict(), | |
headers=exc.headers, | |
) | |
def custom_openapi(): | |
if app.openapi_schema: | |
return app.openapi_schema | |
openapi_schema = get_openapi( | |
title="RyuzakiLib API", | |
version="4.1.3", | |
summary="Use It Only For Personal Project Else I Need To Delete The Api", | |
description=description, | |
routes=app.routes, | |
) | |
openapi_schema["info"]["x-logo"] = { | |
"url": "https://github-production-user-asset-6210df.s3.amazonaws.com/90479255/289277800-f26513f7-cdf4-44ee-9a08-f6b27e6b99f7.jpg" | |
} | |
app.openapi_schema = openapi_schema | |
return app.openapi_schema | |
app.openapi = custom_openapi | |
app.add_exception_handler(HTTPException, custom_exception_handler) | |