Spaces:
Building
Building
File size: 5,229 Bytes
4e350f0 499c0c0 4e350f0 b0be6b7 4e350f0 b0be6b7 20b3117 4e350f0 20b3117 4e350f0 499c0c0 7369068 dbc7ce8 7369068 499c0c0 7369068 499c0c0 0fa2220 499c0c0 4e350f0 499c0c0 81a2580 499c0c0 4c562e2 499c0c0 4e350f0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 |
import libtorrent as lt
import streamlit as st
import os
import re
import time
import requests
from bs4 import BeautifulSoup
import requests
import json
import os
import math
import re
import subprocess
import shutil
from collections import defaultdict
from tqdm import tqdm
import urllib.parse
def sesi(info, lokasi_file):
st.session_state.info = info
st.session_state.lokasi_file = lokasi_file
if 'info' in st.session_state:
num_lines = st.session_state.info.count('\n') + 1
text_area_height = 25 * num_lines
st.text_area("Hasil Redirect", st.session_state.info, height=text_area_height)
with open(st.session_state.lokasi_file, 'rb') as f:
file_contents = f.read()
st.download_button(
label="Download File TXT",
data=file_contents,
file_name=st.session_state.lokasi_file.replace('/home/user/app/', '').title().replace('Txt', 'txt'),
mime='text/plain'
)
return st.session_state.info, st.session_state.lokasi_file
def simpan_txt(nama_file, teks):
with open(nama_file + '.txt', 'w') as f:
f.write(teks)
lokasi_file = os.path.abspath(nama_file + '.txt')
return lokasi_file
def link_redirect(url):
# Kamus untuk menerjemahkan kode status
status_codes = {
200: 'OK',
301: 'Dipindahkan Secara Permanen',
302: 'Ditemukan',
303: 'Lihat Lainnya',
304: 'Tidak Dimodifikasi',
307: 'Pengalihan Sementara',
400: 'Permintaan Buruk',
401: 'Tidak Sah',
403: 'Dilarang',
404: 'Tidak Ditemukan',
500: 'Kesalahan Server Internal',
502: 'Gerbang Buruk',
503: 'Layanan Tidak Tersedia'
}
info = ''
response = requests.get(url)
if response.history:
info += "Redirects: \n"
for resp in response.history:
status = status_codes.get(resp.status_code, 'Kode Status Tidak Diketahui')
info += f"[{resp.status_code}] : {status} \n{resp.url}\n\n"
info += "Final destination: \n"
status = status_codes.get(response.status_code, 'Kode Status Tidak Diketahui')
info += f"[{response.status_code}] : {status} \n{response.url}\n"
return info, response.url
else:
info += "No Redirects: \n"
return info, url
def get_digits(thumbnail_url):
if 'cospuri' in thumbnail_url:
match = re.search(r'/0(\d{3})', thumbnail_url)
elif 'legsjapan' in thumbnail_url:
match = re.search(r'/(\d{4})', thumbnail_url)
elif 'fellatiojapan' in thumbnail_url or 'spermmania' in thumbnail_url:
match = re.search(r'/(\d+)_', thumbnail_url)
if match:
digits = match.group(1)
print(f"Kode Digit: {digits}")
return digits
else:
return ''
def get_video_info(url):
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
# Mencari judul video di elemen meta dengan name="twitter:title"
title = soup.find("meta", attrs={"name": "twitter:title"})
if title:
video_title = title['content']
if "Cospuri" in video_title:
series = "Cospuri"
elif "Sperm Mania" in video_title:
series = "Sperm Mania"
elif "Legs Japan" in video_title:
series = "Legs Japan"
elif "Fellatio Japan" in video_title:
series = "Fellatio Japan"
# Temukan elemen "div" dengan kelas "starsList"
stars_div = soup.find('div', class_='pornstarsList')
# Temukan semua elemen "a" di dalam "div"
actor_elements = stars_div.find_all('a', class_='label')
# Ambil nama-nama dari elemen "a"
actor_names = [actor.text for actor in actor_elements]
# Gabungkan elemen-elemen dalam array dengan " & " di antara mereka
actress = " & ".join(actor_names)
print(f"Series: {series}")
print(f"Artis: {actress}")
return actress, series
else:
print("Tidak ditemukan elemen meta dengan name twitter:title")
return '', ''
def format_info(info):
response = requests.get(info)
soup = BeautifulSoup(response.text, 'html.parser')
if 'www.sakurajav.com' in info:
actress, series = get_video_info(info)
# Mencari thumbnailUrl di elemen meta dengan name="twitter:image"
thumbnail = soup.find("meta", attrs={"name": "twitter:image"})
if thumbnail:
thumbnail_url = thumbnail['content']
digit = get_digits(thumbnail_url)
info = f"{series} {digit} - {actress}"
else:
# Menghapus angka '3D' dari awal string dan mengambil 10 karakter terakhir
info = info.split('%')[-10].replace('3D1', '').replace('3D', '')
# Pola regex untuk mencocokkan pola yang dijelaskan
pattern = r'([a-z]+)(\d+)'
# Mencari kecocokan dengan pola regex
matches = re.findall(pattern, info, re.IGNORECASE)
if matches:
alfabet, number = matches[0]
# Mengubah alfabet menjadi huruf besar dan menghapus '0' dari awal nomor
number = number.lstrip('0').zfill(3)
info = f"{alfabet.upper()}-{number}"
return info |