Renga commited on
Commit
3f9bd3f
1 Parent(s): b209590

publicearn1

Browse files
Dockerfile ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10
2
+
3
+ # Update package lists and install necessary dependencies
4
+ RUN apt-get update && \
5
+ apt-get install -y wget gnupg ca-certificates && \
6
+ rm -rf /var/lib/apt/lists/*
7
+
8
+ # Install Google Chrome
9
+ RUN wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - && \
10
+ echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list && \
11
+ apt-get update && \
12
+ apt-get install -y google-chrome-stable && \
13
+ rm -rf /var/lib/apt/lists/*
14
+ RUN apt-get install -y unzip
15
+ # Install ChromeDriver
16
+ RUN wget -q https://edgedl.me.gvt1.com/edgedl/chrome/chrome-for-testing/119.0.6045.105/linux64/chromedriver-linux64.zip && \
17
+ unzip chromedriver-linux64.zip && \
18
+ mv chromedriver-linux64/chromedriver /usr/local/bin/ && \
19
+ rm chromedriver-linux64.zip
20
+
21
+ RUN useradd -m -u 1000 user
22
+ RUN adduser user sudo
23
+ USER user
24
+ ENV HOME=/home/user \
25
+ PATH=/home/user/.local/bin:$PATH
26
+ WORKDIR $HOME/app
27
+
28
+ # Continue with your remaining commands
29
+ RUN pip install --no-cache-dir --upgrade pip
30
+ COPY --chown=user . $HOME/app
31
+
32
+ RUN pip install --no-cache-dir -r requirements.txt
33
+
34
+ CMD python3 app.py & python3 main.py
Hello_world.py ADDED
@@ -0,0 +1 @@
 
 
1
+ print('Hello')
animepahe/__pycache__/anime.cpython-310.pyc ADDED
Binary file (2 kB). View file
 
animepahe/__pycache__/kwik_token_extractor.cpython-310.pyc ADDED
Binary file (2.94 kB). View file
 
animepahe/anime.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from kwik_extractor import KwikExtractor
2
+ import cloudscraper
3
+ from bs4 import BeautifulSoup
4
+ from time import sleep
5
+ import re
6
+ session = cloudscraper.create_scraper()
7
+ from animepahe.kwik_token_extractor import kwik_token_extractor
8
+
9
+ def get_cookie_and_response(episode):
10
+
11
+ head = {
12
+ "referer": episode,
13
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36 Edg/80.0.361.69"
14
+ }
15
+ response = session.get(episode, headers=head)
16
+ cookie = []
17
+ try:
18
+ cookie.append(response.headers["set-cookie"])
19
+ cookie.append(response)
20
+ except Exception as ex:
21
+ return None
22
+
23
+ return cookie
24
+
25
+ def set_token(response_text):
26
+ data = re.search("[\S]+\",[\d]+,\"[\S]+\",[\d]+,[\d]+,[\d]+", response_text).group(0)
27
+ parameters = data.split(",")
28
+ para1 = parameters[0].strip("\"")
29
+ para2 = int(parameters[1])
30
+ para3 = parameters[2].strip("\"")
31
+ para4 = int(parameters[3])
32
+ para5 = int(parameters[4])
33
+ para6 = int(parameters[5])
34
+
35
+ page_data = kwik_token_extractor.extract_data(para1, para2, para3, para4, para5, para6)
36
+ page_data = BeautifulSoup(page_data, "html.parser")
37
+
38
+ input_field = page_data.find("input", attrs={"name": "_token"})
39
+
40
+ # print(input_field)
41
+
42
+ if input_field is not None:
43
+ token = input_field["value"]
44
+ print(token)
45
+ return token
46
+
47
+ return False
48
+
49
+
50
+ def set_direct_link(episode):
51
+ cookie = get_cookie_and_response(episode)
52
+ if cookie is None:
53
+ sleep(2)
54
+ cookie = get_cookie_and_response(episode)
55
+
56
+ if cookie is None:
57
+ return False
58
+ token=set_token(cookie[1].text)
59
+ if token is None:
60
+ return False
61
+ head = {
62
+ "origin": "https://kwik.cx",
63
+ "referer": 'https://kwik.cx/f/'+episode.split('/')[-1],
64
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36 Edg/80.0.361.69",
65
+ "cookie": cookie[0]
66
+ }
67
+
68
+ payload = {
69
+ "_token": token
70
+ }
71
+
72
+ post_url = "https://kwik.cx/d/"+episode.split('/')[-1]
73
+ print(post_url)
74
+ resp_headers = session.post(post_url, data=payload, headers=head, allow_redirects=False)
75
+ return resp_headers.headers['location']
76
+
animepahe/kwik_token_extractor.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from js2py.pyjs import *
2
+
3
+ # setting scope
4
+ var = Scope(JS_BUILTINS)
5
+ set_global_object(var)
6
+
7
+ # Code follows:
8
+ var.registers(['extract_data', '_0xe12c'])
9
+
10
+
11
+ @Js
12
+ def PyJsHoisted__0xe12c_(d, e, f, this, arguments, var=var):
13
+ var = Scope({'d': d, 'e': e, 'f': f, 'this': this, 'arguments': arguments}, var)
14
+ var.registers(['e', 'i', 'h', 'g', 'k', 'd', 'j', 'f'])
15
+ var.put('g', Js('0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/').callprop('split', Js('')))
16
+ var.put('h', var.get('g').callprop('slice', Js(0.0), var.get('e')))
17
+ var.put('i', var.get('g').callprop('slice', Js(0.0), var.get('f')))
18
+
19
+ @Js
20
+ def PyJs_anonymous_0_(a, b, c, this, arguments, var=var):
21
+ var = Scope({'a': a, 'b': b, 'c': c, 'this': this, 'arguments': arguments}, var)
22
+ var.registers(['b', 'c', 'a'])
23
+ if PyJsStrictNeq(var.get('h').callprop('indexOf', var.get('b')), (-Js(1.0))):
24
+ return var.put('a', (
25
+ var.get('h').callprop('indexOf', var.get('b')) * var.get('Math').callprop('pow', var.get('e'),
26
+ var.get('c'))), '+')
27
+
28
+ PyJs_anonymous_0_._set_name('anonymous')
29
+ var.put('j',
30
+ var.get('d').callprop('split', Js('')).callprop('reverse').callprop('reduce', PyJs_anonymous_0_, Js(0.0)))
31
+ var.put('k', Js(''))
32
+ while (var.get('j') > Js(0.0)):
33
+ var.put('k', (var.get('i').get((var.get('j') % var.get('f'))) + var.get('k')))
34
+ var.put('j', ((var.get('j') - (var.get('j') % var.get('f'))) / var.get('f')))
35
+ return (var.get('k') or Js('0'))
36
+
37
+
38
+ PyJsHoisted__0xe12c_.func_name = '_0xe12c'
39
+ var.put('_0xe12c', PyJsHoisted__0xe12c_)
40
+
41
+
42
+ @Js
43
+ def PyJsHoisted_extract_data_(h, u, n, t, e, r, this, arguments, var=var):
44
+ var = Scope({'h': h, 'u': u, 'n': n, 't': t, 'e': e, 'r': r, 'this': this, 'arguments': arguments}, var)
45
+ var.registers(['e', 'len', 'i', 'h', 'u', 'n', 'r', 's', 't', 'j'])
46
+ var.put('r', Js(''))
47
+ # for JS loop
48
+ var.put('i', Js(0.0))
49
+ var.put('len', var.get('h').get('length'))
50
+ while (var.get('i') < var.get('len')):
51
+ try:
52
+ var.put('s', Js(''))
53
+ while PyJsStrictNeq(var.get('h').get(var.get('i')), var.get('n').get(var.get('e'))):
54
+ var.put('s', var.get('h').get(var.get('i')), '+')
55
+ (var.put('i', Js(var.get('i').to_number()) + Js(1)) - Js(1))
56
+ # for JS loop
57
+ var.put('j', Js(0.0))
58
+ while (var.get('j') < var.get('n').get('length')):
59
+ try:
60
+ var.put('s', var.get('s').callprop('replace',
61
+ var.get('RegExp').create(var.get('n').get(var.get('j')),
62
+ Js('g')), var.get('j')))
63
+ finally:
64
+ (var.put('j', Js(var.get('j').to_number()) + Js(1)) - Js(1))
65
+ var.put('r', var.get('String').callprop('fromCharCode', (
66
+ var.get('_0xe12c')(var.get('s'), var.get('e'), Js(10.0)) - var.get('t'))), '+')
67
+ finally:
68
+ (var.put('i', Js(var.get('i').to_number()) + Js(1)) - Js(1))
69
+ return var.get('decodeURIComponent')(var.get('escape')(var.get('r')))
70
+
71
+
72
+ PyJsHoisted_extract_data_.func_name = 'extract_data'
73
+ var.put('extract_data', PyJsHoisted_extract_data_)
74
+
75
+ kwik_token_extractor = var.to_python()
app.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from flask import Flask
3
+ from flask_restful import Resource, Api
4
+
5
+ app = Flask(__name__)
6
+ api = Api(app)
7
+
8
+ class Greeting (Resource):
9
+ def get(self):
10
+ return "Bot is Up & Running!"
11
+
12
+ api.add_resource(Greeting, '/')
13
+ app.run(host="0.0.0.0", port=os.environ.get("PORT", 7860))
14
+
15
+
16
+ # from flask import Flask, request
17
+
18
+ # app = Flask(__name__)
19
+
20
+ # rawhtml = """
21
+ # <html>
22
+ # <head>
23
+ # <meta charset="utf-8">
24
+ # <meta name="viewport" content="width=device-width, initial-scale=1.0">
25
+ # <title>Play Videos via Links</title>
26
+ # <link rel="icon" href="https://i.slow.pics/vBpClAQI.webp">
27
+ # <link rel="stylesheet" href="https://cdn.plyr.io/3.7.3/plyr.css" />
28
+ # <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.5.2/css/bootstrap.min.css" integrity="sha384-JcKb8q3iqJ61gNV9KGb8thSsNjpSL0n8PARn9HuZOnIxN0hoP+VmmDGMN5t9UJ0Z" crossorigin="anonymous" />
29
+ # <script src="https://code.jquery.com/jquery-3.5.1.slim.min.js" integrity="sha384-DfXdz2htPH0lsSSs5nCTpuj/zy4C+OGpamoFVy38MVBnE+IbbVYUew+OrCXaRkfj" crossorigin="anonymous"></script>
30
+ # <script src="https://cdn.jsdelivr.net/npm/popper.js@1.16.1/dist/umd/popper.min.js" integrity="sha384-9/reFTGAW83EW2RDu2S0VKaIzap3H66lZH81PoYlFhbGU+6BZp6G7niu735Sk7lN" crossorigin="anonymous"></script>
31
+ # <script src="https://stackpath.bootstrapcdn.com/bootstrap/4.5.2/js/bootstrap.min.js" integrity="sha384-B4gt1jrGC7Jh4AgTPSdUtOBvfO8shuf57BaghqFfPlYxofvL8/KUEfYiJOMMV+rV" crossorigin="anonymous"></script>
32
+ # <style>
33
+ # .button-85 {
34
+ # padding: 0.6em 2em;
35
+ # border: none;
36
+ # outline: none;
37
+ # color: rgb(255, 255, 255);
38
+ # background: #111;
39
+ # cursor: pointer;
40
+ # position: relative;
41
+ # z-index: 0;
42
+ # border-radius: 10px;
43
+ # user-select: none;
44
+ # }
45
+
46
+ # .button-85:before {
47
+ # content: "";
48
+ # background: linear-gradient(
49
+ # 45deg,
50
+ # #ff0000,
51
+ # #ff7300,
52
+ # #fffb00,
53
+ # #48ff00,
54
+ # #00ffd5,
55
+ # #002bff,
56
+ # #7a00ff,
57
+ # #ff00c8,
58
+ # #ff0000
59
+ # );
60
+ # position: absolute;
61
+ # top: -2px;
62
+ # left: -2px;
63
+ # background-size: 400%;
64
+ # z-index: -1;
65
+ # filter: blur(5px);
66
+ # -webkit-filter: blur(5px);
67
+ # width: calc(100% + 4px);
68
+ # height: calc(100% + 4px);
69
+ # animation: glowing-button-85 20s linear infinite;
70
+ # transition: opacity 0.3s ease-in-out;
71
+ # border-radius: 10px;
72
+ # }
73
+
74
+ # @keyframes glowing-button-85 {
75
+ # 0% {
76
+ # background-position: 0 0;
77
+ # }
78
+ # 50% {
79
+ # background-position: 400% 0;
80
+ # }
81
+ # 100% {
82
+ # background-position: 0 0;
83
+ # }
84
+ # }
85
+
86
+ # .button-85:after {
87
+ # z-index: -1;
88
+ # content: "";
89
+ # position: absolute;
90
+ # width: 100%;
91
+ # height: 100%;
92
+ # background: #222;
93
+ # left: 0;
94
+ # top: 0;
95
+ # border-radius: 10px;
96
+ # }
97
+ # </style>
98
+ # </head>
99
+ # <body style = "background-color: black;">
100
+ # <div class = "container-fluid">
101
+ # <div class = "row">
102
+ # <div style="background-color: #19232d;" class = "col-12 fixed-top">
103
+ # <h1 class = "text-center p-3" style = "color: yellow;background-color: #19232d; font-weight: bold;">
104
+ # 𝚁𝚞𝚜𝚑𝚒𝚍𝚑𝚊𝚛
105
+ # </h1>
106
+ # </div>
107
+ # <div class = "col-12 p-5" style = "background-color: black;"></div>
108
+ # <div class = "col-12 mt-5 mb-5">
109
+ # <video id="player" playsinline controls data-poster="https://cdn.jsdelivr.net/npm/@googledrive/index@2.2.3/images/poster.jpg">
110
+ # <source src="{video_url}" type="video/mp4">
111
+ # <source src="{video_url}" type="video/webm">
112
+ # </video>
113
+ # <script src="https://cdn.plyr.io/3.7.3/plyr.js"></script>
114
+ # <script>const player = new Plyr('#player');</script>
115
+ # </div>
116
+ # <div class = "col-12 text-center">
117
+ # <button class="button-85 mb-3 mt-3" role="button">
118
+ # <a href="{video_url}" style = "text-decoration: none; color: yellow;font-weight: bold;font-size: 20px;">
119
+ # Download Video
120
+ # </a>
121
+ # </button>
122
+ # </div>
123
+ # <div class = "col-12 text-center">
124
+ # <button class="bg-primary pl-2 pr-2 pt-1 pb-1 m-2 shadow" style = "border-radius :50px; border-width: 0px; width: 220px; height: 60px;" width = "200px">
125
+ # <a href="intent:{video_url}#Intent;package=com.mxtech.videoplayer.ad;end">
126
+ # <img src="https://i.slow.pics/qeem0Xni.png" alt="mx_logo" width="150px">
127
+ # </a>
128
+ # </button>
129
+ # </div>
130
+ # <div class = "col-12 text-center">
131
+ # <button class="pl-2 pr-2 pt-1 pb-1 m-2" style = "border-radius :50px; border-width: 0px; width: 220px; height: 60px; background-color: #7400FF;">
132
+ # <a href="intent:{video_url}#Intent;package=org.videolan.vlc;end">
133
+ # <img src="https://i.slow.pics/lP6ygVnl.png" alt="vlc_logo" width="100px" height="40px"/>
134
+ # </a>
135
+ # </button>
136
+ # </div>
137
+ # <div class = "col-12 text-center">
138
+ # <button class="pl-2 pr-2 pt-1 pb-1 m-2" style = "border-radius :50px; border-width: 0px; background-color: lightblue; width: 220px; height: 60px;">
139
+ # <a href="intent:{video_url}#Intent;action=com.young.simple.player.playback_online;package=com.young.simple.player;end">
140
+ # <img src="https://i.slow.pics/ZKTgV9HV.png" alt="s_player_logo" width="170px">
141
+ # </a>
142
+ # </button>
143
+ # </div>
144
+ # <div class = "col-12 text-center">
145
+ # <button class="bg-warning pl-2 pr-2 pt-1 pb-1 m-2" style = "border-radius :50px; border-width: 0px; width: 220px; height: 60px;">
146
+ # <a href="intent:{video_url}#Intent;package=com.playit.videoplayer;end">
147
+ # <img src="https://i.slow.pics/c0C4vnAa.png" alt="playit_logo" width="170px">
148
+ # </a>
149
+ # </button>
150
+ # </div>
151
+ # <div class = "col-12 p-5" style = "background-color: black;"></div>
152
+ # <div style="background-color: #19232d;" class = "col-12 p-2 fixed-bottom">
153
+ # <p class = "text-center" style = "color: white; font-size: 20px;">
154
+ # Made with plyr
155
+ # </p>
156
+ # </div>
157
+ # </div>
158
+ # </div>
159
+ # </body>
160
+ # </html>
161
+ # """
162
+
163
+ # @app.route('/stream')
164
+ # def player():
165
+ # vid_url = request.args.get('url')
166
+ # return rawhtml.replace("{video_url}", f"{vid_url}")
167
+
168
+ # @app.route('/')
169
+ # def hello_world():
170
+ # return "<h1>Site Running</h1>"
171
+
172
+ # if __name__ == "__main__":
173
+ # app.run(host="0.0.0.0", port=os.environ.get("PORT", 7860))
broadcast_helper.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # (c) adarsh-goel
2
+
3
+ import asyncio
4
+ import traceback
5
+ from pyrogram.errors import FloodWait, InputUserDeactivated, UserIsBlocked, PeerIdInvalid
6
+ import logging
7
+ import datetime
8
+
9
+ from database import Database
10
+ db_url = "mongodb+srv://herukotest:herukotest@test.trmvd8p.mongodb.net/?retryWrites=true&w=majority"
11
+ #db_url = "mongodb+srv://testfiletolink:testfiletolink@file.k0gf5py.mongodb.net/?retryWrites=true&w=majority"
12
+ name = 'Bypass'
13
+ db = Database(db_url, name)
14
+ logger = logging.getLogger(__name__)
15
+ logger.setLevel(logging.INFO)
16
+
17
+ #info=main.UPDATES_CHANNEL
18
+
19
+
20
+ async def broadcast_messages(user_id, message):
21
+ try:
22
+ await message.copy(chat_id=user_id)
23
+ return True, "Success"
24
+ except FloodWait as e:
25
+ await asyncio.sleep(e.x)
26
+ return await broadcast_messages(user_id, message)
27
+ except InputUserDeactivated:
28
+ await db.delete_user(int(user_id))
29
+ logging.info(f"{user_id}-Removed from Database, since deleted account.")
30
+ return False, "Deleted"
31
+ except UserIsBlocked:
32
+ logging.info(f"{user_id} -Blocked the bot.")
33
+ return False, "Blocked"
34
+ except PeerIdInvalid:
35
+ await db.delete_user(int(user_id))
36
+ logging.info(f"{user_id} - PeerIdInvalid")
37
+ return False, "Error"
38
+ except Exception as e:
39
+ return False, "Error"
bypasser.py ADDED
@@ -0,0 +1,2022 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from re import match as rematch, findall, sub as resub, compile as recompile
3
+ from re import findall, match, search
4
+ import requests
5
+ from requests import get as rget
6
+ import base64
7
+ from urllib.parse import unquote, urlparse, parse_qs, quote
8
+ import time
9
+ import cloudscraper
10
+ #from cloudscraper import create_scraper
11
+ from bs4 import BeautifulSoup, NavigableString, Tag
12
+ from lxml import etree
13
+ from curl_cffi import requests as Nreq
14
+ from curl_cffi.requests import Session as cSession
15
+ import hashlib
16
+ import json
17
+ from dotenv import load_dotenv
18
+ load_dotenv()
19
+ from asyncio import sleep as asleep, create_task,gather
20
+ import os
21
+ import ddl
22
+ from cfscrape import create_scraper
23
+ from uuid import uuid4
24
+ from requests import Session
25
+ from ddl import humanbytes
26
+ from animepahe.anime import set_direct_link
27
+ from lk21 import Bypass
28
+ from base64 import standard_b64encode
29
+ from selenium import webdriver
30
+ from selenium.webdriver.common.by import By
31
+ from selenium.webdriver.support import expected_conditions as ec
32
+ from selenium.webdriver.support.ui import WebDriverWait
33
+ import chromedriver_autoinstaller
34
+ ##########################################################
35
+ # ENVs
36
+
37
+ GDTOT_CRYPT = "b0lDek5LSCt6ZjVRR2EwZnY4T1EvVndqeDRtbCtTWmMwcGNuKy8wYWpDaz0%3D"
38
+ Laravel_Session = os.environ.get("Laravel_Session","")
39
+ XSRF_TOKEN = os.environ.get("XSRF_TOKEN","")
40
+ DCRYPT = os.environ.get("DRIVEFIRE_CRYPT","cnhXOGVQNVlpeFZlM2lvTmN6Z2FPVWJiSjVBbWdVN0dWOEpvR3hHbHFLVT0%3D")
41
+ KCRYPT = os.environ.get("KOLOP_CRYPT","a1V1ZWllTnNNNEZtbkU4Y0RVd3pkRG5UREFJZFlUaC9GRko5NUNpTHNFcz0%3D")
42
+ HCRYPT = os.environ.get("HUBDRIVE_CRYPT","UDJvaFFVQjhlUThTN1I4elNSdkJGem1WUVZYUXFvS3FNRWlCeEM1clhnVT0%3D")
43
+ KATCRYPT = os.environ.get("KATDRIVE_CRYPT","bzQySHVKSkY0bEczZHlqOWRsSHZCazBkOGFDak9HWXc1emRTL1F6Rm9ubz0%3D")
44
+ uid=os.environ.get('PEUID')
45
+ print(uid)
46
+ class DDLException(Exception):
47
+ """Not method found for extracting direct download link from the http link"""
48
+ pass
49
+
50
+ ############################################################
51
+ # Lists
52
+
53
+ otherslist = ["exe.io","exey.io","sub2unlock.net","sub2unlock.com","rekonise.com","letsboost.net","ph.apps2app.com","mboost.me",
54
+ "sub4unlock.com","ytsubme.com","social-unlock.com","boost.ink","goo.gl","shrto.ml"]
55
+
56
+ gdlist = ["appdrive","driveapp","drivehub","gdflix","drivesharer","drivebit","drivelinks","driveace",
57
+ "drivepro","driveseed"]
58
+
59
+ DDL_REGEX = recompile(r"DDL\(([^),]+)\, (([^),]+)), (([^),]+)), (([^),]+))\)")
60
+
61
+ POST_ID_REGEX = recompile(r'"postId":"(\d+)"')
62
+
63
+ ###############################################################
64
+
65
+ ###############################################################
66
+ # index scrapper
67
+
68
+ def scrapeIndex(url, username="none", password="none"):
69
+
70
+ def authorization_token(username, password):
71
+ user_pass = f"{username}:{password}"
72
+ return f"Basic {base64.b64encode(user_pass.encode()).decode()}"
73
+
74
+
75
+ def decrypt(string):
76
+ return base64.b64decode(string[::-1][24:-20]).decode('utf-8')
77
+
78
+
79
+ def func(payload_input, url, username, password):
80
+ next_page = False
81
+ next_page_token = ""
82
+
83
+ url = f"{url}/" if url[-1] != '/' else url
84
+
85
+ try: headers = {"authorization":authorization_token(username,password)}
86
+ except: return "username/password combination is wrong", None, None
87
+
88
+ encrypted_response = requests.post(url, data=payload_input, headers=headers)
89
+ if encrypted_response.status_code == 401: return "username/password combination is wrong", None, None
90
+
91
+ try: decrypted_response = json.loads(decrypt(encrypted_response.text))
92
+ except: return "something went wrong. check index link/username/password field again", None, None
93
+
94
+ page_token = decrypted_response["nextPageToken"]
95
+ if page_token is None:
96
+ next_page = False
97
+ else:
98
+ next_page = True
99
+ next_page_token = page_token
100
+
101
+
102
+ if list(decrypted_response.get("data").keys())[0] != "error":
103
+ file_length = len(decrypted_response["data"]["files"])
104
+ result = ""
105
+
106
+ for i, _ in enumerate(range(file_length)):
107
+ files_type = decrypted_response["data"]["files"][i]["mimeType"]
108
+ if files_type != "application/vnd.google-apps.folder":
109
+ files_name = decrypted_response["data"]["files"][i]["name"]
110
+
111
+ direct_download_link = url + quote(files_name)
112
+ result += f"• {files_name} :\n{direct_download_link}\n\n"
113
+ return result, next_page, next_page_token
114
+
115
+ def format(result):
116
+ long_string = ''.join(result)
117
+ new_list = []
118
+
119
+ while len(long_string) > 0:
120
+ if len(long_string) > 4000:
121
+ split_index = long_string.rfind("\n\n", 0, 4000)
122
+ if split_index == -1:
123
+ split_index = 4000
124
+ else:
125
+ split_index = len(long_string)
126
+
127
+ new_list.append(long_string[:split_index])
128
+ long_string = long_string[split_index:].lstrip("\n\n")
129
+
130
+ return new_list
131
+
132
+ # main
133
+ x = 0
134
+ next_page = False
135
+ next_page_token = ""
136
+ result = []
137
+
138
+ payload = {"page_token":next_page_token, "page_index": x}
139
+ print(f"Index Link: {url}\n")
140
+ temp, next_page, next_page_token = func(payload, url, username, password)
141
+ if temp is not None: result.append(temp)
142
+
143
+ while next_page == True:
144
+ payload = {"page_token":next_page_token, "page_index": x}
145
+ temp, next_page, next_page_token = func(payload, url, username, password)
146
+ if temp is not None: result.append(temp)
147
+ x += 1
148
+
149
+ if len(result)==0: return None
150
+ return format(result)
151
+
152
+
153
+ ##############################################################
154
+ # shortners
155
+
156
+ def gofile_dl(url: str):
157
+ rget = requests.Session()
158
+ resp = rget.get('https://api.gofile.io/createAccount')
159
+ if resp.status_code == 200:
160
+ data = resp.json()
161
+ if data['status'] == 'ok' and data.get('data', {}).get('token', None):
162
+ token = data['data']['token']
163
+ else:
164
+ return(f'ERROR: Failed to Create GoFile Account')
165
+ else:
166
+ return(f'ERROR: GoFile Server Response Failed')
167
+ headers = f'Cookie: accountToken={token}'
168
+ def getNextedFolder(contentId, path):
169
+ params = {'contentId': contentId, 'token': token, 'websiteToken': '7fd94ds12fds4'}
170
+ res = rget.get('https://api.gofile.io/getContent', params=params)
171
+ if res.status_code == 200:
172
+ json_data = res.json()
173
+ if json_data['status'] == 'ok':
174
+ links = {}
175
+ for content in json_data['data']['contents'].values():
176
+ if content["type"] == "folder":
177
+ path = path+"/"+content['name']
178
+ links.update(getNextedFolder(content['id'], path))
179
+ elif content["type"] == "file":
180
+ links[content['link']] = path
181
+ return links
182
+ else:
183
+ return(f'ERROR: Failed to Receive All Files List')
184
+ else:
185
+ return(f'ERROR: GoFile Server Response Failed')
186
+ return list([getNextedFolder(url[url.rfind('/')+1:], ""), headers][0].keys())[0]
187
+
188
+
189
+ #################################################
190
+ # drivefire
191
+
192
+ def parse_info_drivefire(res):
193
+ info_parsed = {}
194
+ title = re.findall('>(.*?)<\/h4>', res.text)[0]
195
+ info_chunks = re.findall('>(.*?)<\/td>', res.text)
196
+ info_parsed['title'] = title
197
+ for i in range(0, len(info_chunks), 2):
198
+ info_parsed[info_chunks[i]] = info_chunks[i+1]
199
+ return info_parsed
200
+
201
+ def drivefire_dl(url,dcrypt):
202
+ client = requests.Session()
203
+ client.cookies.update({'crypt': dcrypt})
204
+
205
+ res = client.get(url)
206
+ info_parsed = parse_info_drivefire(res)
207
+ info_parsed['error'] = False
208
+
209
+ up = urlparse(url)
210
+ req_url = f"{up.scheme}://{up.netloc}/ajax.php?ajax=download"
211
+
212
+ file_id = url.split('/')[-1]
213
+ data = { 'id': file_id }
214
+ headers = {'x-requested-with': 'XMLHttpRequest'}
215
+
216
+ try:
217
+ res = client.post(req_url, headers=headers, data=data).json()['file']
218
+ except:
219
+ return "Error"#{'error': True, 'src_url': url}
220
+
221
+ decoded_id = res.rsplit('/', 1)[-1]
222
+ info_parsed = f"https://drive.google.com/file/d/{decoded_id}"
223
+ return info_parsed
224
+
225
+
226
+ ##################################################
227
+ # kolop
228
+
229
+ def parse_info_kolop(res):
230
+ info_parsed = {}
231
+ title = re.findall('>(.*?)<\/h4>', res.text)[0]
232
+ info_chunks = re.findall('>(.*?)<\/td>', res.text)
233
+ info_parsed['title'] = title
234
+ for i in range(0, len(info_chunks), 2):
235
+ info_parsed[info_chunks[i]] = info_chunks[i+1]
236
+ return info_parsed
237
+
238
+ def kolop_dl(url,kcrypt):
239
+ client = requests.Session()
240
+ client.cookies.update({'crypt': kcrypt})
241
+
242
+ res = client.get(url)
243
+ info_parsed = parse_info_kolop(res)
244
+ info_parsed['error'] = False
245
+
246
+ up = urlparse(url)
247
+ req_url = f"{up.scheme}://{up.netloc}/ajax.php?ajax=download"
248
+
249
+ file_id = url.split('/')[-1]
250
+ data = { 'id': file_id }
251
+ headers = { 'x-requested-with': 'XMLHttpRequest'}
252
+
253
+ try:
254
+ res = client.post(req_url, headers=headers, data=data).json()['file']
255
+ except:
256
+ return "Error"#{'error': True, 'src_url': url}
257
+
258
+ gd_id = re.findall('gd=(.*)', res, re.DOTALL)[0]
259
+ info_parsed['gdrive_url'] = f"https://drive.google.com/open?id={gd_id}"
260
+ info_parsed['src_url'] = url
261
+
262
+ return info_parsed['gdrive_url']
263
+
264
+
265
+ ##################################################
266
+ # mediafire
267
+
268
+ def mediafire(url):
269
+
270
+ res = requests.get(url, stream=True)
271
+ contents = res.text
272
+
273
+ for line in contents.splitlines():
274
+ m = re.search(r'href="((http|https)://download[^"]+)', line)
275
+ if m:
276
+ return m.groups()[0]
277
+
278
+
279
+ ####################################################
280
+ # zippyshare
281
+
282
+ def zippyshare(url):
283
+ resp = requests.get(url).text
284
+ surl = resp.split("document.getElementById('dlbutton').href = ")[1].split(";")[0]
285
+ parts = surl.split("(")[1].split(")")[0].split(" ")
286
+ val = str(int(parts[0]) % int(parts[2]) + int(parts[4]) % int(parts[6]))
287
+ surl = surl.split('"')
288
+ burl = url.split("zippyshare.com")[0]
289
+ furl = burl + "zippyshare.com" + surl[1] + val + surl[-2]
290
+ return furl
291
+
292
+
293
+ ####################################################
294
+ # filercrypt
295
+
296
+ def getlinks(dlc,client):
297
+ headers = {
298
+ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0',
299
+ 'Accept': 'application/json, text/javascript, */*',
300
+ 'Accept-Language': 'en-US,en;q=0.5',
301
+ 'X-Requested-With': 'XMLHttpRequest',
302
+ 'Origin': 'http://dcrypt.it',
303
+ 'Connection': 'keep-alive',
304
+ 'Referer': 'http://dcrypt.it/',
305
+ }
306
+
307
+ data = {
308
+ 'content': dlc,
309
+ }
310
+
311
+ response = client.post('http://dcrypt.it/decrypt/paste', headers=headers, data=data).json()["success"]["links"]
312
+ links = ""
313
+ for link in response:
314
+ links = links + link + "\n"
315
+ return links[:-1]
316
+
317
+
318
+ def filecrypt(url):
319
+
320
+ client = cloudscraper.create_scraper(allow_brotli=False)
321
+ headers = {
322
+ "authority": "filecrypt.co",
323
+ "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
324
+ "accept-language": "en-US,en;q=0.9",
325
+ "cache-control": "max-age=0",
326
+ "content-type": "application/x-www-form-urlencoded",
327
+ "dnt": "1",
328
+ "origin": "https://filecrypt.co",
329
+ "referer": url,
330
+ "sec-ch-ua": '"Google Chrome";v="105", "Not)A;Brand";v="8", "Chromium";v="105"',
331
+ "sec-ch-ua-mobile": "?0",
332
+ "sec-ch-ua-platform": "Windows",
333
+ "sec-fetch-dest": "document",
334
+ "sec-fetch-mode": "navigate",
335
+ "sec-fetch-site": "same-origin",
336
+ "sec-fetch-user": "?1",
337
+ "upgrade-insecure-requests": "1",
338
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36"
339
+ }
340
+
341
+
342
+ resp = client.get(url, headers=headers)
343
+ soup = BeautifulSoup(resp.content, "html.parser")
344
+
345
+ buttons = soup.find_all("button")
346
+ for ele in buttons:
347
+ line = ele.get("onclick")
348
+ if line !=None and "DownloadDLC" in line:
349
+ dlclink = "https://filecrypt.co/DLC/" + line.split("DownloadDLC('")[1].split("'")[0] + ".html"
350
+ break
351
+
352
+ resp = client.get(dlclink,headers=headers)
353
+ return getlinks(resp.text,client)
354
+
355
+
356
+ #####################################################
357
+ # dropbox
358
+
359
+ def dropbox(url):
360
+ return url.replace("www.","").replace("dropbox.com","dl.dropboxusercontent.com").replace("?dl=0","")
361
+
362
+
363
+ ######################################################
364
+ # shareus
365
+
366
+ def shareus(url):
367
+ headers = {'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',}
368
+ DOMAIN = "https://us-central1-my-apps-server.cloudfunctions.net"
369
+ sess = requests.session()
370
+
371
+ code = url.split("/")[-1]
372
+ params = {'shortid': code, 'initial': 'true', 'referrer': 'https://shareus.io/',}
373
+ response = requests.get(f'{DOMAIN}/v', params=params, headers=headers)
374
+
375
+ for i in range(1,4):
376
+ json_data = {'current_page': i,}
377
+ response = sess.post(f'{DOMAIN}/v', headers=headers, json=json_data)
378
+
379
+ response = sess.get(f'{DOMAIN}/get_link', headers=headers).json()
380
+ return response["link_info"]["destination"]
381
+
382
+ #######################################################
383
+ # anonfiles
384
+
385
+ def anonfile(url):
386
+
387
+ headersList = { "Accept": "*/*"}
388
+ payload = ""
389
+
390
+ response = requests.request("GET", url, data=payload, headers=headersList).text.split("\n")
391
+ for ele in response:
392
+ if "https://cdn" in ele and "anonfiles.com" in ele and url.split("/")[-2] in ele:
393
+ break
394
+
395
+ return ele.split('href="')[1].split('"')[0]
396
+
397
+
398
+ ##########################################################
399
+ # pixl
400
+
401
+ def pixl(url):
402
+ count = 1
403
+ dl_msg = ""
404
+ currentpage = 1
405
+ settotalimgs = True
406
+ totalimages = ""
407
+ client = cloudscraper.create_scraper(allow_brotli=False)
408
+ resp = client.get(url)
409
+ if resp.status_code == 404:
410
+ return "File not found/The link you entered is wrong!"
411
+ soup = BeautifulSoup(resp.content, "html.parser")
412
+ if "album" in url and settotalimgs:
413
+ totalimages = soup.find("span", {"data-text": "image-count"}).text
414
+ settotalimgs = False
415
+ thmbnailanch = soup.findAll(attrs={"class": "--media"})
416
+ links = soup.findAll(attrs={"data-pagination": "next"})
417
+ try:
418
+ url = links[0].attrs["href"]
419
+ except BaseException:
420
+ url = None
421
+ for ref in thmbnailanch:
422
+ imgdata = client.get(ref.attrs["href"])
423
+ if not imgdata.status_code == 200:
424
+ time.sleep(5)
425
+ continue
426
+ imghtml = BeautifulSoup(imgdata.text, "html.parser")
427
+ downloadanch = imghtml.find(attrs={"class": "btn-download"})
428
+ currentimg = downloadanch.attrs["href"]
429
+ currentimg = currentimg.replace(" ", "%20")
430
+ dl_msg += f"{count}. {currentimg}\n"
431
+ count += 1
432
+ currentpage += 1
433
+ fld_msg = f"Your provided Pixl.is link is of Folder and I've Found {count - 1} files in the folder.\n"
434
+ fld_link = f"\nFolder Link: {url}\n"
435
+ final_msg = fld_link + "\n" + fld_msg + "\n" + dl_msg
436
+ return final_msg
437
+
438
+
439
+ ############################################################
440
+ # sirigan ( unused )
441
+
442
+ def siriganbypass(url):
443
+ client = requests.Session()
444
+ res = client.get(url)
445
+ url = res.url.split('=', maxsplit=1)[-1]
446
+
447
+ while True:
448
+ try: url = base64.b64decode(url).decode('utf-8')
449
+ except: break
450
+
451
+ return url.split('url=')[-1]
452
+
453
+
454
+ ############################################################
455
+ # shorte
456
+
457
+ def sh_st_bypass(url):
458
+ client = requests.Session()
459
+ client.headers.update({'referer': url})
460
+ p = urlparse(url)
461
+
462
+ res = client.get(url)
463
+
464
+ sess_id = re.findall('''sessionId(?:\s+)?:(?:\s+)?['|"](.*?)['|"]''', res.text)[0]
465
+
466
+ final_url = f"{p.scheme}://{p.netloc}/shortest-url/end-adsession"
467
+ params = {
468
+ 'adSessionId': sess_id,
469
+ 'callback': '_'
470
+ }
471
+ time.sleep(5) # !important
472
+
473
+ res = client.get(final_url, params=params)
474
+ dest_url = re.findall('"(.*?)"', res.text)[1].replace('\/','/')
475
+
476
+ return {
477
+ 'src': url,
478
+ 'dst': dest_url
479
+ }['dst']
480
+
481
+
482
+ #############################################################
483
+
484
+
485
+ def parse_info_sharer(res):
486
+ f = re.findall(">(.*?)<\/td>", res.text)
487
+ info_parsed = {}
488
+ for i in range(0, len(f), 3):
489
+ info_parsed[f[i].lower().replace(' ', '_')] = f[i+2]
490
+ return info_parsed
491
+
492
+ def sharer_pw(url,Laravel_Session, XSRF_TOKEN, forced_login=False):
493
+ client = cloudscraper.create_scraper(allow_brotli=False)
494
+ client.cookies.update({
495
+ "XSRF-TOKEN": XSRF_TOKEN,
496
+ "laravel_session": Laravel_Session
497
+ })
498
+ res = client.get(url)
499
+ token = re.findall("_token\s=\s'(.*?)'", res.text, re.DOTALL)[0]
500
+ ddl_btn = etree.HTML(res.content).xpath("//button[@id='btndirect']")
501
+ info_parsed = parse_info_sharer(res)
502
+ info_parsed['error'] = True
503
+ info_parsed['src_url'] = url
504
+ info_parsed['link_type'] = 'login'
505
+ info_parsed['forced_login'] = forced_login
506
+ headers = {
507
+ 'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
508
+ 'x-requested-with': 'XMLHttpRequest'
509
+ }
510
+ data = {
511
+ '_token': token
512
+ }
513
+ if len(ddl_btn):
514
+ info_parsed['link_type'] = 'direct'
515
+ if not forced_login:
516
+ data['nl'] = 1
517
+ try:
518
+ res = client.post(url+'/dl', headers=headers, data=data).json()
519
+ except:
520
+ return info_parsed
521
+ if 'url' in res and res['url']:
522
+ info_parsed['error'] = False
523
+ info_parsed['gdrive_link'] = res['url']
524
+ if len(ddl_btn) and not forced_login and not 'url' in info_parsed:
525
+ # retry download via login
526
+ return sharer_pw(url,Laravel_Session, XSRF_TOKEN, forced_login=True)
527
+ return info_parsed["gdrive_link"]
528
+
529
+
530
+ #################################################################
531
+ # gdtot
532
+
533
+ def gdtot(url):
534
+ cget = cloudscraper.create_scraper(allow_brotli=False)
535
+ try:
536
+ url = cget.get(url).url
537
+ p_url = urlparse(url)
538
+ res = cget.post(f"{p_url.scheme}://{p_url.hostname}/ddl", data={'dl': str(url.split('/')[-1])})
539
+ except Exception as e:
540
+ return(f'{e.__class__.__name__}')
541
+ if (drive_link := findall(r"myDl\('(.*?)'\)", res.text)) and "drive.google.com" in drive_link[0]:
542
+ d_link = drive_link[0]
543
+ elif GDTOT_CRYPT:
544
+ cget.get(url, cookies={'crypt': GDTOT_CRYPT})
545
+ p_url = urlparse(url)
546
+ js_script = cget.post(f"{p_url.scheme}://{p_url.hostname}/dld", data={'dwnld': url.split('/')[-1]})
547
+ g_id = findall('gd=(.*?)&', js_script.text)
548
+ try:
549
+ decoded_id = b64decode(str(g_id[0])).decode('utf-8')
550
+ except:
551
+ return("Try in your browser, mostly file not found or user limit exceeded!")
552
+ d_link = f'https://drive.google.com/open?id={decoded_id}'
553
+ print(f'2. {d_link}')
554
+ else:
555
+ return('Drive Link not found, Try in your broswer! GDTOT_CRYPT not Provided!')
556
+ soup = BeautifulSoup(cget.get(url).content, "html.parser")
557
+ parse_data = (soup.select('meta[property^="og:description"]')[0]['content']).replace('Download ' , '').rsplit('-', maxsplit=1)
558
+ parse_txt = f'''┎ <b>Name :</b> <i>{parse_data[0]}</i>
559
+ ┠ <b>Size :</b> <i>{parse_data[-1]}</i>
560
+
561
+ ┠ <b>GDToT Link :</b> {url}
562
+ '''
563
+ try:
564
+ res=cget.get(url)
565
+ if (tele_link := findall(r"myDl2\('(.*?)'\)", res.text)):
566
+ print(tele_link[0])
567
+ parse_txt += f"┖ <b>Telegram Link :</b> {tele_link[0]}\n"
568
+ except:pass
569
+ parse_txt += f'┠ <b>Index Link :</b> https://indexlink.mrprincebotz.workers.dev/direct.aspx?id={get_gdriveid(d_link)}\n'
570
+ parse_txt += f"┖ <b>Drive Link :</b> {d_link}"
571
+ return parse_txt
572
+
573
+ ##################################################################
574
+ # adfly
575
+
576
+ def decrypt_url(code):
577
+ a, b = '', ''
578
+ for i in range(0, len(code)):
579
+ if i % 2 == 0: a += code[i]
580
+ else: b = code[i] + b
581
+ key = list(a + b)
582
+ i = 0
583
+ while i < len(key):
584
+ if key[i].isdigit():
585
+ for j in range(i+1,len(key)):
586
+ if key[j].isdigit():
587
+ u = int(key[i]) ^ int(key[j])
588
+ if u < 10: key[i] = str(u)
589
+ i = j
590
+ break
591
+ i+=1
592
+ key = ''.join(key)
593
+ decrypted = base64.b64decode(key)[16:-16]
594
+ return decrypted.decode('utf-8')
595
+
596
+
597
+ def adfly(url):
598
+ client = cloudscraper.create_scraper(allow_brotli=False)
599
+ res = client.get(url).text
600
+ out = {'error': False, 'src_url': url}
601
+ try:
602
+ ysmm = re.findall("ysmm\s+=\s+['|\"](.*?)['|\"]", res)[0]
603
+ except:
604
+ out['error'] = True
605
+ return out
606
+ url = decrypt_url(ysmm)
607
+ if re.search(r'go\.php\?u\=', url):
608
+ url = base64.b64decode(re.sub(r'(.*?)u=', '', url)).decode()
609
+ elif '&dest=' in url:
610
+ url = unquote(re.sub(r'(.*?)dest=', '', url))
611
+ out['bypassed_url'] = url
612
+ return out
613
+
614
+
615
+ ##############################################################################################
616
+
617
+
618
+
619
+ ######################################################################################################
620
+ # droplink
621
+
622
+ def droplink(url):
623
+ client = cloudscraper.create_scraper(allow_brotli=False)
624
+ res = client.get(url, timeout=5)
625
+
626
+ ref = re.findall("action[ ]{0,}=[ ]{0,}['|\"](.*?)['|\"]", res.text)[0]
627
+ h = {"referer": ref}
628
+ res = client.get(url, headers=h)
629
+
630
+ bs4 = BeautifulSoup(res.content, "html.parser")
631
+ inputs = bs4.find_all("input")
632
+ data = {input.get("name"): input.get("value") for input in inputs}
633
+ h = {
634
+ "content-type": "application/x-www-form-urlencoded",
635
+ "x-requested-with": "XMLHttpRequest",
636
+ }
637
+
638
+ p = urlparse(url)
639
+ final_url = f"{p.scheme}://{p.netloc}/links/go"
640
+ time.sleep(3.1)
641
+ res = client.post(final_url, data=data, headers=h).json()
642
+
643
+ if res["status"] == "success": return res["url"]
644
+ return 'Something went wrong :('
645
+
646
+
647
+ #####################################################################################################################
648
+ # link vertise
649
+
650
+ def linkvertise(url):
651
+ params = {'url': url,}
652
+ response = requests.get('https://bypass.pm/bypass2', params=params).json()
653
+ if response["success"]: return response["destination"]
654
+ else: return response["msg"]
655
+
656
+
657
+ ###################################################################################################################
658
+ # others
659
+
660
+ def others(url):
661
+ return "API Currently not Available"
662
+
663
+
664
+ #################################################################################################################
665
+ # ouo
666
+
667
+ # RECAPTCHA v3 BYPASS
668
+ # code from https://github.com/xcscxr/Recaptcha-v3-bypass
669
+ def recaptchaV3(ANCHOR_URL = 'https://www.google.com/recaptcha/api2/anchor?ar=1&k=6Lcr1ncUAAAAAH3cghg6cOTPGARa8adOf-y9zv2x&co=aHR0cHM6Ly9vdW8ucHJlc3M6NDQz&hl=en&v=pCoGBhjs9s8EhFOHJFe8cqis&size=invisible&cb=ahgyd1gkfkhe'):
670
+ rs = Session()
671
+ rs.headers.update({'content-type': 'application/x-www-form-urlencoded'})
672
+ matches = findall('([api2|enterprise]+)\/anchor\?(.*)', ANCHOR_URL)[0]
673
+ url_base = 'https://www.google.com/recaptcha/' + matches[0] + '/'
674
+ params = matches[1]
675
+ res = rs.get(url_base + 'anchor', params=params)
676
+ token = findall(r'"recaptcha-token" value="(.*?)"', res.text)[0]
677
+ params = dict(pair.split('=') for pair in params.split('&'))
678
+ res = rs.post(url_base + 'reload', params=f'k={params["k"]}', data=f"v={params['v']}&reason=q&c={token}&k={params['k']}&co={params['co']}")
679
+ return findall(r'"rresp","(.*?)"', res.text)[0]
680
+
681
+ def ouo(url: str):
682
+ tempurl = url.replace("ouo.press", "ouo.io")
683
+ p = urlparse(tempurl)
684
+ id = tempurl.split('/')[-1]
685
+ client = cSession(headers={'authority': 'ouo.io', 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7', 'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8', 'cache-control': 'max-age=0', 'referer': 'http://www.google.com/ig/adde?moduleurl=', 'upgrade-insecure-requests': '1'})
686
+ res = client.get(tempurl, impersonate="chrome110")
687
+ next_url = f"{p.scheme}://{p.hostname}/go/{id}"
688
+
689
+ for _ in range(2):
690
+ if res.headers.get('Location'):
691
+ break
692
+ bs4 = BeautifulSoup(res.content, 'lxml')
693
+ inputs = bs4.form.findAll("input", {"name": compile(r"token$")})
694
+ data = { inp.get('name'): inp.get('value') for inp in inputs }
695
+ data['x-token'] = recaptchaV3()
696
+ res = client.post(next_url, data=data, headers= {'content-type': 'application/x-www-form-urlencoded'}, allow_redirects=False, impersonate="chrome110")
697
+ next_url = f"{p.scheme}://{p.hostname}/xreallcygo/{id}"
698
+
699
+ return res.headers.get('Location')
700
+
701
+
702
+
703
+ ####################################################################################################################
704
+ # mdisk
705
+
706
+ def mdisk(url):
707
+ header = {
708
+ 'Accept': '*/*',
709
+ 'Accept-Language': 'en-US,en;q=0.5',
710
+ 'Accept-Encoding': 'gzip, deflate, br',
711
+ 'Referer': 'https://mdisk.me/',
712
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36'
713
+ }
714
+
715
+ inp = url
716
+ fxl = inp.split("/")
717
+ cid = fxl[-1]
718
+
719
+ URL = f'https://diskuploader.entertainvideo.com/v1/file/cdnurl?param={cid}'
720
+ res = requests.get(url=URL, headers=header).json()
721
+ return res['download'] + '\n\n' + res['source']
722
+
723
+
724
+ ##################################################################################################################
725
+ # AppDrive or DriveApp etc. Look-Alike Link and as well as the Account Details (Required for Login Required Links only)
726
+
727
+ def unified(url):
728
+
729
+ if ddl.is_share_link(url):
730
+ if 'https://gdtot' in url: return ddl.gdtot(url)
731
+ else: return ddl.sharer_scraper(url)
732
+
733
+ try:
734
+ Email = "chzeesha4@gmail.com"
735
+ Password = "zeeshi#789"
736
+
737
+ account = {"email": Email, "passwd": Password}
738
+ client = cloudscraper.create_scraper(allow_brotli=False)
739
+ client.headers.update(
740
+ {
741
+ "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36"
742
+ }
743
+ )
744
+ data = {"email": account["email"], "password": account["passwd"]}
745
+ client.post(f"https://{urlparse(url).netloc}/login", data=data)
746
+ res = client.get(url)
747
+ key = re.findall('"key",\s+"(.*?)"', res.text)[0]
748
+ ddl_btn = etree.HTML(res.content).xpath("//button[@id='drc']")
749
+ info = re.findall(">(.*?)<\/li>", res.text)
750
+ info_parsed = {}
751
+ for item in info:
752
+ kv = [s.strip() for s in item.split(":", maxsplit=1)]
753
+ info_parsed[kv[0].lower()] = kv[1]
754
+ info_parsed = info_parsed
755
+ info_parsed["error"] = False
756
+ info_parsed["link_type"] = "login"
757
+ headers = {
758
+ "Content-Type": f"multipart/form-data; boundary={'-'*4}_",
759
+ }
760
+ data = {"type": 1, "key": key, "action": "original"}
761
+ if len(ddl_btn):
762
+ info_parsed["link_type"] = "direct"
763
+ data["action"] = "direct"
764
+ while data["type"] <= 3:
765
+ boundary = f'{"-"*6}_'
766
+ data_string = ""
767
+ for item in data:
768
+ data_string += f"{boundary}\r\n"
769
+ data_string += f'Content-Disposition: form-data; name="{item}"\r\n\r\n{data[item]}\r\n'
770
+ data_string += f"{boundary}--\r\n"
771
+ gen_payload = data_string
772
+ try:
773
+ response = client.post(url, data=gen_payload, headers=headers).json()
774
+ break
775
+ except BaseException:
776
+ data["type"] += 1
777
+ if "url" in response:
778
+ info_parsed["gdrive_link"] = response["url"]
779
+ elif "error" in response and response["error"]:
780
+ info_parsed["error"] = True
781
+ info_parsed["error_message"] = response["message"]
782
+ else:
783
+ info_parsed["error"] = True
784
+ info_parsed["error_message"] = "Something went wrong :("
785
+ if info_parsed["error"]:
786
+ return info_parsed
787
+ if "driveapp" in urlparse(url).netloc and not info_parsed["error"]:
788
+ res = client.get(info_parsed["gdrive_link"])
789
+ drive_link = etree.HTML(res.content).xpath(
790
+ "//a[contains(@class,'btn')]/@href"
791
+ )[0]
792
+ info_parsed["gdrive_link"] = drive_link
793
+ info_parsed["src_url"] = url
794
+ if "drivehub" in urlparse(url).netloc and not info_parsed["error"]:
795
+ res = client.get(info_parsed["gdrive_link"])
796
+ drive_link = etree.HTML(res.content).xpath(
797
+ "//a[contains(@class,'btn')]/@href"
798
+ )[0]
799
+ info_parsed["gdrive_link"] = drive_link
800
+ if "gdflix" in urlparse(url).netloc and not info_parsed["error"]:
801
+ res = client.get(info_parsed["gdrive_link"])
802
+ drive_link = etree.HTML(res.content).xpath(
803
+ "//a[contains(@class,'btn')]/@href"
804
+ )[0]
805
+ info_parsed["gdrive_link"] = drive_link
806
+
807
+ if "drivesharer" in urlparse(url).netloc and not info_parsed["error"]:
808
+ res = client.get(info_parsed["gdrive_link"])
809
+ drive_link = etree.HTML(res.content).xpath(
810
+ "//a[contains(@class,'btn')]/@href"
811
+ )[0]
812
+ info_parsed["gdrive_link"] = drive_link
813
+ if "drivebit" in urlparse(url).netloc and not info_parsed["error"]:
814
+ res = client.get(info_parsed["gdrive_link"])
815
+ drive_link = etree.HTML(res.content).xpath(
816
+ "//a[contains(@class,'btn')]/@href"
817
+ )[0]
818
+ info_parsed["gdrive_link"] = drive_link
819
+ if "drivelinks" in urlparse(url).netloc and not info_parsed["error"]:
820
+ res = client.get(info_parsed["gdrive_link"])
821
+ drive_link = etree.HTML(res.content).xpath(
822
+ "//a[contains(@class,'btn')]/@href"
823
+ )[0]
824
+ info_parsed["gdrive_link"] = drive_link
825
+ if "driveace" in urlparse(url).netloc and not info_parsed["error"]:
826
+ res = client.get(info_parsed["gdrive_link"])
827
+ drive_link = etree.HTML(res.content).xpath(
828
+ "//a[contains(@class,'btn')]/@href"
829
+ )[0]
830
+ info_parsed["gdrive_link"] = drive_link
831
+ if "drivepro" in urlparse(url).netloc and not info_parsed["error"]:
832
+ res = client.get(info_parsed["gdrive_link"])
833
+ drive_link = etree.HTML(res.content).xpath(
834
+ "//a[contains(@class,'btn')]/@href"
835
+ )[0]
836
+ info_parsed["gdrive_link"] = drive_link
837
+ if info_parsed["error"]:
838
+ return "Faced an Unknown Error!"
839
+ return info_parsed["gdrive_link"]
840
+ except BaseException:
841
+ return "Unable to Extract GDrive Link"
842
+
843
+
844
+ #####################################################################################################
845
+ # urls open
846
+
847
+ def vnshortener(url):
848
+ sess = requests.session()
849
+ DOMAIN = "https://vnshortener.com/"
850
+ org = "https://nishankhatri.xyz"
851
+ PhpAcc = DOMAIN + "link/new.php"
852
+ ref = "https://nishankhatri.com.np/"
853
+ go = DOMAIN + "links/go"
854
+
855
+ code = url.split("/")[3]
856
+ final_url = f"{DOMAIN}/{code}/"
857
+ headers = {'authority': DOMAIN, 'origin': org}
858
+
859
+ data = {'step_1': code,}
860
+ response = sess.post(PhpAcc, headers=headers, data=data).json()
861
+ id = response["inserted_data"]["id"]
862
+ data = {'step_2': code, 'id': id,}
863
+ response = sess.post(PhpAcc, headers=headers, data=data).json()
864
+
865
+ headers['referer'] = ref
866
+ params = {'sid': str(id)}
867
+ resp = sess.get(final_url, params=params, headers=headers)
868
+ soup = BeautifulSoup(resp.content, "html.parser")
869
+ inputs = soup.find_all("input")
870
+ data = { input.get('name'): input.get('value') for input in inputs }
871
+
872
+ time.sleep(1)
873
+ headers['x-requested-with'] = 'XMLHttpRequest'
874
+ try:
875
+ r = sess.post(go, data=data, headers=headers).json()
876
+ if r["status"] == "success": return r["url"]
877
+ else: raise
878
+ except: return "Something went wrong :("
879
+
880
+ def rslinks(url):
881
+ client = requests.session()
882
+ download = rget(url, stream=True, allow_redirects=False)
883
+ v = download.headers["location"]
884
+ code = v.split('ms9')[-1]
885
+ final = f"http://techyproio.blogspot.com/p/short.html?{code}=="
886
+ try: return final
887
+ except: return "Something went wrong :("
888
+ def du_link(url):
889
+ client = cloudscraper.create_scraper(allow_brotli=False)
890
+ DOMAIN = "https://du-link.in"
891
+ url = url[:-1] if url[-1] == '/' else url
892
+ code = url.split("/")[-1]
893
+ final_url = f"{DOMAIN}/{code}"
894
+ ref = "https://profitshort.com/"
895
+ h = {"referer": ref}
896
+ resp = client.get(final_url,headers=h,allow_redirects=False)
897
+ soup = BeautifulSoup(resp.content, "html.parser")
898
+ inputs = soup.find_all("input")
899
+ data = { input.get('name'): input.get('value') for input in inputs }
900
+ h = { "x-requested-with": "XMLHttpRequest" }
901
+ time.sleep(0)
902
+ r = client.post(f"{DOMAIN}/links/go", data=data, headers=h)
903
+ try: return r.json()['url']
904
+ except: return "Something went wrong :("
905
+ def atozcartoonist(url):
906
+ re=requests.get(url,allow_redirects=False).headers['Location']
907
+ print(re)
908
+ if 'moonlinks' in re:
909
+ link=transcript(re, "https://go.moonlinks.in/", "https://www.akcartoons.in/", 7)
910
+ link='https://drive.google.com/uc?id={link.split("=")[-1]}&export=download'
911
+ elif 'xpshort'in re:
912
+ link=re
913
+ return link
914
+
915
+ #####################################################################################################
916
+ # bitly + tinyurl
917
+
918
+ def bitly_tinyurl(url: str) -> str:
919
+ response = requests.get(url).url
920
+ try: return response
921
+ except: return "Something went wrong :("
922
+
923
+ #####################################################################################################
924
+ # thinfi
925
+
926
+ def thinfi(url: str) -> str :
927
+ response = requests.get(url)
928
+ soup = BeautifulSoup(response.content, "html.parser").p.a.get("href")
929
+ try: return soup
930
+ except: return "Something went wrong :("
931
+
932
+ #####################################################################################################
933
+ # helpers
934
+
935
+ # check if present in list
936
+ def ispresent(inlist,url):
937
+ for ele in inlist:
938
+ if ele in url:
939
+ return True
940
+ return False
941
+
942
+
943
+ async def transcript(url: str, DOMAIN: str, ref: str, sltime) -> str:
944
+ code = url.rstrip("/").split("/")[-1]
945
+ cget = cloudscraper.create_scraper(allow_brotli=False).request
946
+ resp = cget("GET", f"{DOMAIN}/{code}", headers={"referer": ref},allow_redirects=False)
947
+ soup = BeautifulSoup(resp.content, "html.parser")
948
+ data = { inp.get('name'): inp.get('value') for inp in soup.find_all("input") }
949
+ await asleep(sltime)
950
+ resp = cget("POST", f"{DOMAIN}/links/go", data=data, headers={ "x-requested-with": "XMLHttpRequest" })
951
+ try:
952
+ return resp.json()['url']
953
+ except:
954
+ return "Something went wrong :("
955
+
956
+
957
+ def get_gdriveid(link):
958
+ if "folders" in link or "file" in link:
959
+ res = search(r"https:\/\/drive\.google\.com\/(?:drive(.*?)\/folders\/|file(.*?)?\/d\/)([-\w]+)", link)
960
+ return res.group(3)
961
+ parsed = urlparse(link)
962
+ return parse_qs(parsed.query)['id'][0]
963
+ def get_dl(link):
964
+ return f"https://indexlink.mrprincebotz.workers.dev/direct.aspx?id={get_gdriveid(link)}"
965
+
966
+
967
+ def drivescript(url, crypt, dtype):
968
+ rs = Session()
969
+ resp = rs.get(url)
970
+ title = findall(r'>(.*?)<\/h4>', resp.text)[0]
971
+ size = findall(r'>(.*?)<\/td>', resp.text)[1]
972
+ p_url = urlparse(url)
973
+
974
+ dlink = ''
975
+ if dtype != "DriveFire":
976
+ try:
977
+ js_query = rs.post(f"{p_url.scheme}://{p_url.hostname}/ajax.php?ajax=direct-download", data={'id': str(url.split('/')[-1])}, headers={'x-requested-with': 'XMLHttpRequest'}).json()
978
+ if str(js_query['code']) == '200':
979
+ dlink = f"{p_url.scheme}://{p_url.hostname}{js_query['file']}"
980
+ except Exception as e:
981
+ LOGGER.error(e)
982
+
983
+ if not dlink and crypt:
984
+ rs.get(url, cookies={'crypt': crypt})
985
+ try:
986
+ js_query = rs.post(f"{p_url.scheme}://{p_url.hostname}/ajax.php?ajax=download", data={'id': str(url.split('/')[-1])}, headers={'x-requested-with': 'XMLHttpRequest'}).json()
987
+ except Exception as e:
988
+ return(f'{e.__class__.__name__}')
989
+ if str(js_query['code']) == '200':
990
+ dlink = f"{p_url.scheme}://{p_url.hostname}{js_query['file']}"
991
+
992
+ if dlink:
993
+ res = rs.get(dlink)
994
+ soup = BeautifulSoup(res.text, 'html.parser')
995
+ gd_data = soup.select('a[class="btn btn-primary btn-user"]')
996
+ parse_txt = f'''┎ <b>Name :</b> <code>{title}</code>
997
+ ┠ <b>Size :</b> <code>{size}</code>
998
+
999
+ ┠ <b>{dtype} Link :</b> {url}'''
1000
+ if dtype == "HubDrive":
1001
+ parse_txt += f'''\n┠ <b>Instant Link :</b> <a href="{gd_data[1]['href']}">Click Here</a>'''
1002
+ if (d_link := gd_data[0]['href']):
1003
+ parse_txt += f"\n┠ <b>Index Link :</b> {get_dl(d_link)}"
1004
+ parse_txt += f"\n┖ <b>Drive Link :</b> {d_link}"
1005
+ return parse_txt
1006
+ elif not dlink and not crypt:
1007
+ return(f'{dtype} Crypt Not Provided and Direct Link Generate Failed')
1008
+ else:
1009
+ return(f'{js_query["file"]}')
1010
+
1011
+ ######################################################################################################
1012
+ #Scrapers
1013
+ def htpmovies(link):
1014
+ client = cloudscraper.create_scraper(allow_brotli=False)
1015
+ r = client.get(link, allow_redirects=True).text
1016
+ j = r.split('("')[-1]
1017
+ url = j.split('")')[0]
1018
+ param = url.split("/")[-1]
1019
+ DOMAIN = "https://go.theforyou.in"
1020
+ final_url = f"{DOMAIN}/{param}"
1021
+ resp = client.get(final_url)
1022
+ soup = BeautifulSoup(resp.content, "html.parser")
1023
+ try: inputs = soup.find(id="go-link").find_all(name="input")
1024
+ except: return "Incorrect Link"
1025
+ data = { input.get('name'): input.get('value') for input in inputs }
1026
+ h = { "x-requested-with": "XMLHttpRequest" }
1027
+ time.sleep(10)
1028
+ r = client.post(f"{DOMAIN}/links/go", data=data, headers=h)
1029
+ try:
1030
+ return r.json()['url']
1031
+ except: return "Something went Wrong !!"
1032
+
1033
+
1034
+ def scrappers(link):
1035
+
1036
+ try: link = rematch(r"^(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'\".,<>?«»“”‘’]))", link)[0]
1037
+ except TypeError: return 'Not a Valid Link.'
1038
+ links = []
1039
+
1040
+ if "sharespark" in link:
1041
+ gd_txt = ""
1042
+ res = rget("?action=printpage;".join(link.split('?')))
1043
+ soup = BeautifulSoup(res.text, 'html.parser')
1044
+ for br in soup.findAll('br'):
1045
+ next_s = br.nextSibling
1046
+ if not (next_s and isinstance(next_s,NavigableString)):
1047
+ continue
1048
+ next2_s = next_s.nextSibling
1049
+ if next2_s and isinstance(next2_s,Tag) and next2_s.name == 'br':
1050
+ if str(next_s).strip():
1051
+ List = next_s.split()
1052
+ if rematch(r'^(480p|720p|1080p)(.+)? Links:\Z', next_s):
1053
+ gd_txt += f'<b>{next_s.replace("Links:", "GDToT Links :")}</b>\n\n'
1054
+ for s in List:
1055
+ ns = resub(r'\(|\)', '', s)
1056
+ if rematch(r'https?://.+\.gdtot\.\S+', ns):
1057
+ r = rget(ns)
1058
+ soup = BeautifulSoup(r.content, "html.parser")
1059
+ title = soup.select('meta[property^="og:description"]')
1060
+ gd_txt += f"<code>{(title[0]['content']).replace('Download ' , '')}</code>\n{ns}\n\n"
1061
+ elif rematch(r'https?://pastetot\.\S+', ns):
1062
+ nxt = resub(r'\(|\)|(https?://pastetot\.\S+)', '', next_s)
1063
+ gd_txt += f"\n<code>{nxt}</code>\n{ns}\n"
1064
+ return gd_txt
1065
+
1066
+ elif "htpmovies" in link and "/exit.php" in link:
1067
+ return htpmovies(link)
1068
+
1069
+ elif "htpmovies" in link:
1070
+ prsd = ""
1071
+ links = []
1072
+ res = rget(link)
1073
+ soup = BeautifulSoup(res.text, 'html.parser')
1074
+ x = soup.select('a[href^="/exit.php?url="]')
1075
+ y = soup.select('h5')
1076
+ z = unquote(link.split('/')[-2]).split('-')[0] if link.endswith('/') else unquote(link.split('/')[-1]).split('-')[0]
1077
+
1078
+ for a in x:
1079
+ links.append(a['href'])
1080
+ prsd = f"Total Links Found : {len(links)}\n\n"
1081
+
1082
+ msdcnt = -1
1083
+ for b in y:
1084
+ if str(b.string).lower().startswith(z.lower()):
1085
+ msdcnt += 1
1086
+ url = f"https://htpmovies.lol"+links[msdcnt]
1087
+ prsd += f"{msdcnt+1}. <b>{b.string}</b>\n{htpmovies(url)}\n\n"
1088
+ asleep(5)
1089
+ return prsd
1090
+
1091
+ elif "cinevood" in link:
1092
+ res=requests.get(link)
1093
+ soup=BeautifulSoup(res.content,'html.parser')
1094
+ l=''
1095
+ ll=[]
1096
+ for j in soup.find_all('h6'):
1097
+ ll.append(j.text)
1098
+ ld=[]
1099
+ for i in soup.find_all('div',{'class':"cat-b"}):
1100
+ ld.append(f'<a href="{i.a["href"]}">➥{i.a.button.text}</a> |')
1101
+ a=0
1102
+ for i in ll:
1103
+ l+=f'{i}\n{ld[a]}{ld[a+1]}\n'
1104
+ a+=2
1105
+ return l
1106
+
1107
+ elif "atishmkv" in link:
1108
+ prsd = ""
1109
+ links = []
1110
+ res = rget(link)
1111
+ soup = BeautifulSoup(res.text, 'html.parser')
1112
+ x = soup.select('a[href^="https://gdflix"]')
1113
+ for a in x:
1114
+ links.append(a['href'])
1115
+ for o in links:
1116
+ prsd += o + '\n\n'
1117
+ return prsd
1118
+
1119
+ elif "teluguflix" in link:
1120
+ gd_txt = ""
1121
+ r = rget(link)
1122
+ soup = BeautifulSoup (r.text, "html.parser")
1123
+ links = soup.select('a[href*="gdtot"]')
1124
+ gd_txt = f"Total Links Found : {len(links)}\n\n"
1125
+ for no, link in enumerate(links, start=1):
1126
+ gdlk = link['href']
1127
+ t = rget(gdlk)
1128
+ soupt = BeautifulSoup(t.text, "html.parser")
1129
+ title = soupt.select('meta[property^="og:description"]')
1130
+ gd_txt += f"{no}. <code>{(title[0]['content']).replace('Download ' , '')}</code>\n{gdlk}\n\n"
1131
+ asleep(1.5)
1132
+ return gd_txt
1133
+
1134
+ elif "taemovies" in link:
1135
+ gd_txt, no = "", 0
1136
+ r = rget(link)
1137
+ soup = BeautifulSoup (r.text, "html.parser")
1138
+ links = soup.select('a[href*="shortingly"]')
1139
+ gd_txt = f"Total Links Found : {len(links)}\n\n"
1140
+ for a in links:
1141
+ glink = transcript(a["href"], "https://insurance.techymedies.com/", "https://highkeyfinance.com/", 5)
1142
+ t = rget(glink)
1143
+ soupt = BeautifulSoup(t.text, "html.parser")
1144
+ title = soupt.select('meta[property^="og:description"]')
1145
+ no += 1
1146
+ gd_txt += f"{no}. {(title[0]['content']).replace('Download ' , '')}\n{glink}\n\n"
1147
+ return gd_txt
1148
+
1149
+ elif "animeremux" in link:
1150
+ gd_txt, no = "", 0
1151
+ r = rget(link)
1152
+ soup = BeautifulSoup (r.text, "html.parser")
1153
+ links = soup.select('a[href*="urlshortx.com"]')
1154
+ gd_txt = f"Total Links Found : {len(links)}\n\n"
1155
+ for a in links:
1156
+ link = a["href"]
1157
+ x = link.split("url=")[-1]
1158
+ gd_txt+=f'➥ {x}\n'
1159
+ return gd_txt
1160
+
1161
+ elif "skymovieshd" in link:
1162
+ gd_txt = ""
1163
+ res = rget(link, allow_redirects=False)
1164
+ soup = BeautifulSoup(res.text, 'html.parser')
1165
+ a = soup.select('a[href^="https://howblogs.xyz"]')
1166
+ t = soup.select('div[class^="Robiul"]')
1167
+ gd_txt += f"<i>{t[-1].text.replace('Download ', '')}</i>\n\n"
1168
+ gd_txt += f"<b>{a[0].text} :</b> \n"
1169
+ nres = rget(a[0]['href'], allow_redirects=False)
1170
+ nsoup = BeautifulSoup(nres.text, 'html.parser')
1171
+ atag = nsoup.select('div[class="cotent-box"] > a[href]')
1172
+ for no, link in enumerate(atag, start=1):
1173
+ gd_txt += f"➥ {link['href']}\n"
1174
+ return gd_txt
1175
+
1176
+ elif "animekaizoku" in link:
1177
+ global post_id
1178
+ gd_txt = ""
1179
+ try: website_html = rget(link).text
1180
+ except: return "Please provide the correct episode link of animekaizoku"
1181
+ try:
1182
+ post_id = POST_ID_REGEX.search(website_html).group(0).split(":")[1].split('"')[1]
1183
+ payload_data_matches = DDL_REGEX.finditer(website_html)
1184
+ except: return "Something Went Wrong !!"
1185
+
1186
+ for match in payload_data_matches:
1187
+ payload_data = match.group(0).split("DDL(")[1].replace(")", "").split(",")
1188
+ payload = {
1189
+ "action" : "DDL",
1190
+ "post_id": post_id,
1191
+ "div_id" : payload_data[0].strip(),
1192
+ "tab_id" : payload_data[1].strip(),
1193
+ "num" : payload_data[2].strip(),
1194
+ "folder" : payload_data[3].strip(),
1195
+ }
1196
+ del payload["num"]
1197
+ link_types = "DDL" if payload["tab_id"] == "2" else "WORKER" if payload["tab_id"] == "4" else "GDRIVE"
1198
+ response = rpost("https://animekaizoku.com/wp-admin/admin-ajax.php",headers={"x-requested-with": "XMLHttpRequest", "referer": "https://animekaizoku.com"}, data=payload)
1199
+ soup = BeautifulSoup(response.text, "html.parser")
1200
+ downloadbutton = soup.find_all(class_="downloadbutton")
1201
+
1202
+ with concurrent.futures.ThreadPoolExecutor() as executor:
1203
+ for button in downloadbutton:
1204
+ if button.text == "Patches": pass
1205
+ else:
1206
+ dict_key = button.text.strip()
1207
+ data_dict[dict_key] = []
1208
+ executor.submit(looper, dict_key, str(button))
1209
+ main_dict[link_types] = deepcopy(data_dict)
1210
+ data_dict.clear()
1211
+
1212
+ to_edit = False
1213
+ for key in main_dict:
1214
+ gd_txt += f"----------------- <b>{key}</b> -----------------\n"
1215
+ dict_data = main_dict[key]
1216
+
1217
+ if bool(dict_data) == 0:
1218
+ gd_txt += "No Links Found\n"
1219
+ else:
1220
+ for y in dict_data:
1221
+ gd_txt += f"\n○ <b>{y}</b>\n"
1222
+ for no, i in enumerate(dict_data[y], start=1):
1223
+ try: gd_txt += f"➥ {no}. <i>{i[0]}</i> : {i[1]}\n"
1224
+ except: pass
1225
+ asleep(5)
1226
+ return gd_txt
1227
+
1228
+ else:
1229
+ res = rget(link)
1230
+ soup = BeautifulSoup(res.text, 'html.parser')
1231
+ mystx = soup.select(r'a[href^="magnet:?xt=urn:btih:"]')
1232
+ for hy in mystx:
1233
+ links.append(hy['href'])
1234
+ return links
1235
+
1236
+
1237
+ ###################################################
1238
+ # script links
1239
+
1240
+ def getfinal(domain, url, sess):
1241
+
1242
+ #sess = requests.session()
1243
+ res = sess.get(url)
1244
+ soup = BeautifulSoup(res.text,"html.parser")
1245
+ soup = soup.find("form").findAll("input")
1246
+ datalist = []
1247
+ for ele in soup:
1248
+ datalist.append(ele.get("value"))
1249
+
1250
+ data = {
1251
+ '_method': datalist[0],
1252
+ '_csrfToken': datalist[1],
1253
+ 'ad_form_data': datalist[2],
1254
+ '_Token[fields]': datalist[3],
1255
+ '_Token[unlocked]': datalist[4],
1256
+ }
1257
+
1258
+ sess.headers = {
1259
+ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0',
1260
+ 'Accept': 'application/json, text/javascript, */*; q=0.01',
1261
+ 'Accept-Language': 'en-US,en;q=0.5',
1262
+ 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
1263
+ 'X-Requested-With': 'XMLHttpRequest',
1264
+ 'Origin': domain,
1265
+ 'Connection': 'keep-alive',
1266
+ 'Referer': url,
1267
+ 'Sec-Fetch-Dest': 'empty',
1268
+ 'Sec-Fetch-Mode': 'cors',
1269
+ 'Sec-Fetch-Site': 'same-origin',
1270
+ }
1271
+
1272
+ # print("waiting 10 secs")
1273
+ time.sleep(10) # important
1274
+ response = sess.post(domain+'/links/go', data=data).json()
1275
+ furl = response["url"]
1276
+ return furl
1277
+
1278
+
1279
+ def getfirst(url):
1280
+
1281
+ sess = requests.session()
1282
+ res = sess.get(url)
1283
+
1284
+ soup = BeautifulSoup(res.text,"html.parser")
1285
+ soup = soup.find("form")
1286
+ action = soup.get("action")
1287
+ soup = soup.findAll("input")
1288
+ datalist = []
1289
+ for ele in soup:
1290
+ datalist.append(ele.get("value"))
1291
+ sess.headers = {
1292
+ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0',
1293
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
1294
+ 'Accept-Language': 'en-US,en;q=0.5',
1295
+ 'Origin': action,
1296
+ 'Connection': 'keep-alive',
1297
+ 'Referer': action,
1298
+ 'Upgrade-Insecure-Requests': '1',
1299
+ 'Sec-Fetch-Dest': 'document',
1300
+ 'Sec-Fetch-Mode': 'navigate',
1301
+ 'Sec-Fetch-Site': 'same-origin',
1302
+ 'Sec-Fetch-User': '?1',
1303
+ }
1304
+
1305
+ data = {'newwpsafelink': datalist[1], "g-recaptcha-response": RecaptchaV3()}
1306
+ response = sess.post(action, data=data)
1307
+ soup = BeautifulSoup(response.text, "html.parser")
1308
+ soup = soup.findAll("div", class_="wpsafe-bottom text-center")
1309
+ for ele in soup:
1310
+ rurl = ele.find("a").get("onclick")[13:-12]
1311
+
1312
+ res = sess.get(rurl)
1313
+ furl = res.url
1314
+ # print(furl)
1315
+ return getfinal(f'https://{furl.split("/")[-2]}/',furl,sess)
1316
+
1317
+ def decodeKey(encoded):
1318
+ key = ''
1319
+
1320
+ i = len(encoded) // 2 - 5
1321
+ while i >= 0:
1322
+ key += encoded[i]
1323
+ i = i - 2
1324
+
1325
+ i = len(encoded) // 2 + 4
1326
+ while i < len(encoded):
1327
+ key += encoded[i]
1328
+ i = i + 2
1329
+
1330
+ return key
1331
+
1332
+ def bypassBluemediafiles(url, torrent=False):
1333
+ headers = {
1334
+ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0',
1335
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
1336
+ 'Accept-Language': 'en-US,en;q=0.5',
1337
+ 'Alt-Used': 'bluemediafiles.com',
1338
+ 'Connection': 'keep-alive',
1339
+ 'Upgrade-Insecure-Requests': '1',
1340
+ 'Sec-Fetch-Dest': 'document',
1341
+ 'Sec-Fetch-Mode': 'navigate',
1342
+ 'Sec-Fetch-Site': 'none',
1343
+ 'Sec-Fetch-User': '?1',
1344
+
1345
+ }
1346
+
1347
+ res = requests.get(url, headers=headers)
1348
+ soup = BeautifulSoup(res.text, 'html.parser')
1349
+ script = str(soup.findAll('script')[3])
1350
+ encodedKey = script.split('Create_Button("')[1].split('");')[0]
1351
+
1352
+
1353
+ headers = {
1354
+ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0',
1355
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
1356
+ 'Accept-Language': 'en-US,en;q=0.5',
1357
+ 'Referer': url,
1358
+ 'Alt-Used': 'bluemediafiles.com',
1359
+ 'Connection': 'keep-alive',
1360
+ 'Upgrade-Insecure-Requests': '1',
1361
+ 'Sec-Fetch-Dest': 'document',
1362
+ 'Sec-Fetch-Mode': 'navigate',
1363
+ 'Sec-Fetch-Site': 'same-origin',
1364
+ 'Sec-Fetch-User': '?1',
1365
+ }
1366
+
1367
+ params = { 'url': decodeKey(encodedKey) }
1368
+
1369
+ if torrent:
1370
+ res = requests.get('https://dl.pcgamestorrents.org/get-url.php', params=params, headers=headers)
1371
+ soup = BeautifulSoup(res.text,"html.parser")
1372
+ furl = soup.find("a",class_="button").get("href")
1373
+
1374
+ else:
1375
+ res = requests.get('https://bluemediafiles.com/get-url.php', params=params, headers=headers)
1376
+ furl = res.url
1377
+ if "mega.nz" in furl:
1378
+ furl = furl.replace("mega.nz/%23!","mega.nz/file/").replace("!","#")
1379
+
1380
+ #print(furl)
1381
+ return furl
1382
+
1383
+ def igggames(url):
1384
+ res = requests.get(url)
1385
+ soup = BeautifulSoup(res.text,"html.parser")
1386
+ soup = soup.find("div",class_="uk-margin-medium-top").findAll("a")
1387
+
1388
+ bluelist = []
1389
+ for ele in soup:
1390
+ bluelist.append(ele.get('href'))
1391
+ bluelist = bluelist[6:-1]
1392
+
1393
+ links = ""
1394
+ for ele in bluelist:
1395
+ if "bluemediafiles" in ele:
1396
+ links = links + bypassBluemediafiles(ele) + "\n"
1397
+ elif "pcgamestorrents.com" in ele:
1398
+ res = requests.get(ele)
1399
+ soup = BeautifulSoup(res.text,"html.parser")
1400
+ turl = soup.find("p",class_="uk-card uk-card-body uk-card-default uk-card-hover").find("a").get("href")
1401
+ links = links + bypassBluemediafiles(turl,True) + "\n"
1402
+ else:
1403
+ links = links + ele + "\n"
1404
+
1405
+ return links[:-1]
1406
+ def try2link_bypass(url):
1407
+ client = cloudscraper.create_scraper(allow_brotli=False)
1408
+
1409
+ url = url[:-1] if url[-1] == '/' else url
1410
+ print(f'ry2link_bp {url}')
1411
+ params = (('d', int(time.time()) + (60 * 4)),)
1412
+ r = client.get(url, params=params, headers= {'Referer': 'https://newforex.online/'})
1413
+
1414
+ soup = BeautifulSoup(r.text, 'html.parser')
1415
+ inputs = soup.find(id="go-link").find_all(name="input")
1416
+ data = { input.get('name'): input.get('value') for input in inputs }
1417
+ time.sleep(7)
1418
+
1419
+ headers = {'Host': 'try2link.com', 'X-Requested-With': 'XMLHttpRequest', 'Origin': 'https://try2link.com', 'Referer': url}
1420
+
1421
+ bypassed_url = client.post('https://try2link.com/links/go', headers=headers,data=data)
1422
+ return bypassed_url.json()["url"]
1423
+
1424
+ def try2link_scrape(url):
1425
+ client = cloudscraper.create_scraper(allow_brotli=False)
1426
+ h = {
1427
+ 'upgrade-insecure-requests': '1', 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36',
1428
+ }
1429
+ res = client.get(url, cookies={}, headers=h)
1430
+ print(res.headers)
1431
+ url = 'https://try2link.com/'+re.findall('try2link\.com\/(.*?) ', res.text)[0]
1432
+ print(url)
1433
+ return try2link_bypass(url)
1434
+
1435
+
1436
+ def psa_bypasser(psa_url):
1437
+ cookies = {'cf_clearance': 'EgNaZUZVvICwi_V.34D6bTmYzyp24zoY_SFrC2vqm7U-1694540798-0-1-530db2b8.dee7f907.c12667d1-0.2.1694540798' }
1438
+ headers = {
1439
+ 'authority': 'psa.wf',
1440
+ 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
1441
+ 'accept-language': 'en-US,en;q=0.9',
1442
+ 'referer': 'https://psa.wf/',
1443
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
1444
+ }
1445
+
1446
+ r = requests.get(psa_url, headers=headers, cookies=cookies)
1447
+ soup = BeautifulSoup(r.text, "html.parser").find_all(class_="dropshadowboxes-drop-shadow dropshadowboxes-rounded-corners dropshadowboxes-inside-and-outside-shadow dropshadowboxes-lifted-both dropshadowboxes-effect-default")
1448
+ links = []
1449
+ for link in soup:
1450
+ try:
1451
+ exit_gate = link.a.get("href")
1452
+ if "/exit" in exit_gate:
1453
+ print("scraping :",exit_gate)
1454
+ links.append(try2link_scrape(exit_gate))
1455
+ except: pass
1456
+ return links
1457
+ def themoviesboss(url):
1458
+ script=requests.get(url).text
1459
+ match = re.search('window\.location\.href="(.*?)"', script)
1460
+ if match:
1461
+ url = match.group(1)
1462
+ if url=='https://themoviesboss.site':
1463
+ return 'File not Found'
1464
+ else:
1465
+ return f'link:{url}\nBypass link: {shortners(url)}'
1466
+ def moviesboss(url):
1467
+ script=requests.get(url)
1468
+ soup=BeautifulSoup(script.content,'html.parser')
1469
+ l=''
1470
+ def boss(url):
1471
+ script=requests.get(url).text
1472
+ match = re.search('window\.location\.href="(.*?)"', script)
1473
+ if match:
1474
+ url = match.group(1)
1475
+ if url=='https://themoviesboss.site':
1476
+ return 'https://themoviesboss.site'
1477
+ else:
1478
+ return f'{(url)}'
1479
+ for p,q in zip(soup.find_all('p',{'style':"text-align: center;"}),soup.find_all('a', class_='maxbutton-2 maxbutton')):
1480
+ aa=p.strong.text.strip().split('\n')[0]
1481
+ l+=f"➥<a href='{boss(q['href'])}'>{aa}</a> |\n"
1482
+
1483
+ return l
1484
+ def tenbit(url):
1485
+ res=requests.get(url)
1486
+ soup=BeautifulSoup(res.content,'html.parser')
1487
+ l=''
1488
+ # for i in soup.find_all('span',{'style':"color: #ffffff;"}):
1489
+ # l+=f'{i.text}'
1490
+ l+='\nLinks: '
1491
+ for i in soup.find_all('a',{'class':'mb-button'}):
1492
+ l+=f'<a href="{i.get("href")}">➥{str(i.text).lstrip()}</a> |'
1493
+ return l
1494
+ def animepahe(link):
1495
+ client = cloudscraper.session()
1496
+ res = client.get(link)
1497
+ soup = BeautifulSoup(res.text, "html.parser")
1498
+ l = soup.find_all("div", id="pickDownload")
1499
+ l2 = []
1500
+ l3 = []
1501
+ s = f"{soup.find('title').text}\n"
1502
+ print(s)
1503
+ for i in l:
1504
+ for j in i.find_all("a"):
1505
+ l2.append(str(j.text).replace("SubsPlease · ",""))
1506
+ res1 = client.get(j.get('href')).text
1507
+ soup1 = BeautifulSoup(res1, "html.parser")
1508
+ l3.append(set_direct_link(soup1.find("a", class_="redirect").get('href')))
1509
+ for a,b in zip(l2,l3):
1510
+ s += f'<a href="{b}">➳ {a}| </a>'
1511
+ return s
1512
+ async def atishmkv(url):
1513
+ client = cloudscraper.create_scraper(allow_brotli=False)
1514
+ res=client.get(url)
1515
+ soup=BeautifulSoup(res.content,'html.parser')
1516
+ l=f'Title:{soup.find("title").text}\n'
1517
+ for i in soup.find_all('a', class_="button button-shadow"):
1518
+ l+=f'➥<a href="{i.get("href")}">{str(i.get_text()).lstrip()}</a> |\n'
1519
+ return l
1520
+ async def telegraph_scaper(url):
1521
+ res=requests.get(url)
1522
+ soup=BeautifulSoup(res.content,'html.parser')
1523
+ ll='Scrape Links \n'
1524
+ for i,j in zip(soup.find_all('strong'),soup.find_all('code')):#,class_='tl_article_content'):
1525
+ ll+=f'➥<a href="{i.a.get("href")}">{str(j.get_text()).lstrip()}</a> |\n'
1526
+ return(ll)
1527
+
1528
+ def atoz(url):
1529
+ re=requests.get(url)
1530
+ soup=BeautifulSoup(re.content,'html.parser')
1531
+ l=f'Title: {soup.find("title")}\n'
1532
+ try:
1533
+ for i in soup.select('div[class*="mks_accordion_item"]'):
1534
+ l+=f'➥<a href="https://www.atozcartoonist.com{i.a["href"]}">{str(i.text).lstrip()}</a>\n'
1535
+
1536
+ except: print('j')
1537
+ return l
1538
+ async def toonworld4all(url: str):
1539
+ if "/redirect/main.php?url=" in url:
1540
+ return f'┎ <b>Source Link:</b> {url}\n┃\n┖ <b>Bypass Link:</b> {rget(url).url}'
1541
+ xml = rget(url).text
1542
+ soup = BeautifulSoup(xml, 'html.parser')
1543
+ if '/episode/' not in url:
1544
+ epl = soup.select('a[href*="/episode/"]')
1545
+ tls = soup.select('div[class*="mks_accordion_heading"]')
1546
+ stitle = search(r'\"name\":\"(.+)\"', xml).group(1).split('"')[0]
1547
+ prsd = f'<b><i>{stitle}</i></b>'
1548
+ for n, (t, l) in enumerate(zip(tls, epl), start=1):
1549
+ prsd += f'''
1550
+
1551
+ {n}. <i><b>{t.strong.string}</b></i>
1552
+ ┖ <b>Link :</b> {l["href"]}'''
1553
+ return prsd
1554
+ links = soup.select('a[href*="/redirect/main.php?url="]')
1555
+ titles = soup.select('h5')
1556
+ prsd = f"<b><i>{titles[0].string}</i></b>"
1557
+ titles.pop(0)
1558
+ slicer, _ = divmod(len(links), len(titles))
1559
+ atasks = []
1560
+ for sl in links:
1561
+ nsl = ""
1562
+ while all(x not in nsl for x in ['rocklinks', 'link1s']):
1563
+ nsl = rget(sl["href"], allow_redirects=False).headers['location']
1564
+ print(nsl)
1565
+ if "rocklinks" in nsl:
1566
+ atasks.append(create_task(transcript(nsl, "https://insurance.techymedies.com/", "https://highkeyfinance.com/", 5)))
1567
+ elif "link1s" in nsl:
1568
+ atasks.append(create_task(transcript(nsl, "https://link1s.com", "https://anhdep24.com/", 9)))
1569
+
1570
+ com_tasks = await gather(*atasks, return_exceptions=True)
1571
+ lstd = [com_tasks[i:i+slicer] for i in range(0, len(com_tasks), slicer)]
1572
+
1573
+ for no, tl in enumerate(titles):
1574
+ prsd += f"\n\n<b>{tl.string}</b>\n┃\n┖ <b>Links :</b> "
1575
+ for tl, sl in zip(links, lstd[no]):
1576
+ if isinstance(sl, Exception):
1577
+ prsd += str(sl)
1578
+ else:
1579
+ prsd += f"<a href='{sl}'>{tl.string}</a>, "
1580
+ prsd = prsd[:-2]
1581
+ return prsd
1582
+ async def toonhub_scrapper(url):
1583
+ client = cloudscraper.create_scraper(allow_brotli=False)
1584
+ if 'redirect/?url' in url:
1585
+ res=client.get(url,allow_redirects=False).headers['Location']
1586
+ return await shortners(res)
1587
+ res = client.get(url)
1588
+ soup = BeautifulSoup(res.content, 'html.parser')
1589
+ if '/episode/' not in url:
1590
+ l = f'{soup.find("title").text}\n'
1591
+ for i, j in zip(soup.find_all('div', {'class': "three_fourth tie-columns last"}),
1592
+ soup.find_all('div', {'class': 'toggle'})):
1593
+ l += f'<a href="{j.a.get("href")}">{str(j.h3.text).lstrip()}\n</a>Context: {i.text}\n'
1594
+ return l
1595
+
1596
+ links = soup.select('a[href*="/redirect/?url="]')
1597
+ titles = soup.select('h5')
1598
+ prsd = f"<b><i>{titles[0].string}</i></b>"
1599
+ titles.pop(0)
1600
+ slicer, _ = divmod(len(links), len(titles))
1601
+ atasks = []
1602
+ for sl in links:
1603
+ nsl = client.get(f'https://toonshub.link/{sl["href"]}', allow_redirects=False).headers['location']
1604
+ print(nsl)
1605
+ atasks.append(create_task(shortners(nsl)))
1606
+ com_tasks = await gather(*atasks, return_exceptions=True)
1607
+ lstd = [com_tasks[i:i+slicer] for i in range(0, len(com_tasks), slicer)]
1608
+
1609
+ for no, tl in enumerate(titles):
1610
+ prsd += f"\n\n<b>{tl.string}</b>\n┃\n┖ <b>Links :</b> "
1611
+ for tl, sl in zip(links, lstd[no]):
1612
+ if isinstance(sl, Exception):
1613
+ prsd += str(sl)
1614
+ else:
1615
+ prsd += f"<a href='{sl}'>{tl.string}</a>, "
1616
+ prsd = prsd[:-2]
1617
+ return prsd
1618
+
1619
+ async def dhakrey(url):
1620
+ cget = cloudscraper.create_scraper(allow_brotli=False).request
1621
+ resp = cget("GET", url)
1622
+ soup=BeautifulSoup(resp.text,'html.parser')
1623
+ title=f'\nTitle: {soup.find("title").text}\n\n'
1624
+ for button in soup.find_all('button', onclick=True):
1625
+ onclick_value = button['onclick']
1626
+ match = re.search(r"window\.open\(['\"](https://drive.daddyop.us/dl[^'\"]+)['\"].*?\)", onclick_value)
1627
+ download=re.search(r"window\.open\(['\"](https://[^'\"]*download\.aspx[^'\"]*)['\"].*?\)", onclick_value)
1628
+ if match:
1629
+ https_link = match.group(1)
1630
+ button_text = button.get_text().strip()
1631
+ if button_text=='Direct Drive Link':
1632
+ soup1=BeautifulSoup(cget('GET',https_link).content,'html.parser')
1633
+ drive_links = soup1.select('a[href^="https://drive.google.com"]')
1634
+ filepress=soup1.select('a[href^="https://new.filepress.store"]')
1635
+ for link,flink in zip(drive_links,filepress):
1636
+ title+=f'➥<a href="{link["href"]}">{str("Drive Link").lstrip()}</a> | '
1637
+ title+=f'➥<a href="{flink["href"]}">{str("Filepress Link").lstrip()}</a> | '
1638
+ if download:
1639
+ https_link = download.group(1)
1640
+ button_text = button.get_text().strip()
1641
+ title+=f'➥<a href="{https_link}">{str(button_text).lstrip()}</a> |\n'
1642
+
1643
+ return title
1644
+ def publicearn(url,uid):
1645
+ chromedriver_autoinstaller.install()
1646
+ chrome_options = webdriver.ChromeOptions()
1647
+ chrome_options.add_argument("--no-sandbox")
1648
+ chrome_options.add_argument("--headless")
1649
+ chrome_options.add_argument("--disable-dev-shm-usage")
1650
+ driver = webdriver.Chrome(options=chrome_options)
1651
+ driver.get(url)
1652
+ for i in range(0,31):
1653
+ time.sleep(1)
1654
+ print(i)
1655
+ code=url.split('/')[-1]
1656
+ ref=(driver.current_url).split('//')[-1].split('/')[0]
1657
+ print(ref)
1658
+ cget = cloudscraper.create_scraper(allow_brotli=False).request
1659
+ resp = cget("GET", f"https://go.publicearn.com/{code}/?uid={uid}", headers={"referer": f'https://{ref}/'})
1660
+ soup = BeautifulSoup(resp.content, "html.parser")
1661
+ data = { inp.get('name'): inp.get('value') for inp in soup.find_all("input") }
1662
+ print(data)
1663
+ resp = cget("POST", f"https://go.publicearn.com/links/go", data=data, headers={ "x-requested-with": "XMLHttpRequest" })
1664
+ try:
1665
+ return resp.json()['url']
1666
+ except Exception as e:
1667
+ print(e)
1668
+
1669
+
1670
+ # shortners
1671
+ async def shortners(url):
1672
+ if "https://igg-games.com/" in url:
1673
+ print("entered igg:",url)
1674
+ return igggames(url)
1675
+ elif "https://katdrive." in url:
1676
+ if KATCRYPT == "":
1677
+ return "🚫 __You can't use this because__ **KATDRIVE_CRYPT** __ENV is not set__"
1678
+
1679
+ print("entered katdrive:",url)
1680
+ return drivescript(url, KATCRYPT, "KatDrive")
1681
+ elif "https://kolop." in url:
1682
+ if KCRYPT == "":
1683
+ return "🚫 __You can't use this because__ **KOLOP_CRYPT** __ENV is not set__"
1684
+
1685
+ print("entered kolop:",url)
1686
+ return kolop_dl(url, KCRYPT)
1687
+
1688
+ # hubdrive
1689
+ elif "https://hubdrive." in url:
1690
+ if HCRYPT == "":
1691
+ return "🚫 __You can't use this because__ **HUBDRIVE_CRYPT** __ENV is not set__"
1692
+
1693
+ print("entered hubdrive:",url)
1694
+ return drivescript(url, HCRYPT, "HubDrive")
1695
+
1696
+ # drivefire
1697
+ elif "https://drivefire." in url:
1698
+ if DCRYPT == "":
1699
+ return "🚫 __You can't use this because__ **DRIVEFIRE_CRYPT** __ENV is not set__"
1700
+
1701
+ print("entered drivefire:",url)
1702
+ return drivefire_dl(url, DCRYPT)
1703
+
1704
+ # filecrypt
1705
+ elif (("https://filecrypt.co/") in url or ("https://filecrypt.cc/" in url)):
1706
+ print("entered filecrypt:",url)
1707
+ return filecrypt(url)
1708
+
1709
+ # shareus
1710
+ elif "https://shareus." in url or "https://shrs.link/" in url:
1711
+ print("entered shareus:",url)
1712
+ return shareus(url)
1713
+
1714
+
1715
+
1716
+ elif "https://shorte.st/" in url:
1717
+ print("entered shorte:",url)
1718
+ return sh_st_bypass(url)
1719
+
1720
+ elif "https://psa.wf/exit" in url:
1721
+ print("enterezbd psa:",url)
1722
+ return try2link_scrape(url)
1723
+ # psa
1724
+ elif "https://psa.wf/" in url:
1725
+ print("entered pssfdgsga:",url)
1726
+ return psa_bypasser(url)
1727
+
1728
+ # sharer pw
1729
+ elif "https://sharer.pw/" in url:
1730
+ if XSRF_TOKEN == "" or Laravel_Session == "":
1731
+ return "🚫 __You can't use this because__ **XSRF_TOKEN** __and__ **Laravel_Session** __ENV is not set__"
1732
+
1733
+ print("entered sharer:",url)
1734
+ return sharer_pw(url, Laravel_Session, XSRF_TOKEN)
1735
+
1736
+ # gdtot url
1737
+ elif "gdtot.cfd" in url:
1738
+ print("entered gdtot:",url)
1739
+ return f"<b><i>Can't bypass Now.They add cloudflare protection.</i></b>"
1740
+ elif 'dhakrey' in url:
1741
+ return await dhakrey(url)
1742
+ # adfly
1743
+ elif "https://adf.ly/" in url:
1744
+ print("entered adfly:",url)
1745
+ out = adfly(url)
1746
+ return out['bypassed_url']
1747
+ # droplink
1748
+ elif "https://droplink.co/" in url:
1749
+ print("entered droplink:",url)
1750
+ return droplink(url)
1751
+
1752
+ # linkvertise
1753
+ elif "https://linkvertise.com/" in url:
1754
+ print("entered linkvertise:",url)
1755
+ return linkvertise(url)
1756
+
1757
+
1758
+ # ouo
1759
+ elif "https://ouo.press/" in url or "https://ouo.io/" in url:
1760
+ print("entered ouo:",url)
1761
+ return ouo(url)
1762
+
1763
+ # try2link
1764
+ elif "https://try2link.com/" in url:
1765
+ print("entered try2links:",url)
1766
+ return try2link_bypass(url)
1767
+
1768
+
1769
+
1770
+ # rslinks
1771
+ elif "rslinks.net" in url:
1772
+ print("entered rslinks:",url)
1773
+ return rslinks(url)
1774
+
1775
+ # bitly + tinyurl
1776
+ elif "bit.ly" in url or "tinyurl.com" in url:
1777
+ print("entered bitly_tinyurl:",url)
1778
+ return bitly_tinyurl(url)
1779
+
1780
+ # thinfi
1781
+ elif "thinfi.com" in url:
1782
+ print("entered thinfi:",url)
1783
+ return thinfi(url)
1784
+
1785
+ # htpmovies sharespark cinevood
1786
+ elif "https://htpmovies." in url or 'sharespark' in url or "https://skymovieshd" in url \
1787
+ or "https://teluguflix" in url or 'https://taemovies' in url or "https://animeremux" in url or 'https://cinevood.' in url or 'https://animeremux.' in url:
1788
+ print("entered htpmovies sharespark cinevood skymovieshd :",url)
1789
+ return scrappers(url)
1790
+
1791
+ # gdrive look alike
1792
+ elif ispresent(gdlist,url):
1793
+ print("entered gdrive look alike:",url)
1794
+ return unified(url)
1795
+
1796
+ # others
1797
+ elif ispresent(otherslist,url):
1798
+ print("entered others:",url)
1799
+ return others(url)
1800
+
1801
+ elif "toonworld4all.me/redirect/main.php?" in url:
1802
+ nsl=''
1803
+ while not any(x in nsl for x in ['rocklinks', 'link1s']):
1804
+ nsl=rget(url,allow_redirects=False).headers['location']
1805
+ if 'go.rocklinks' in nsl:
1806
+ as1=await transcript(nsl, "https://insurance.techymedies.com/", "https://highkeyfinance.com/", 5)
1807
+ else:
1808
+ as1=await transcript(nsl,"https://link1s.com/","https://anhdep24.com/",8)
1809
+ return as1
1810
+ #Toonworld4all
1811
+ elif "toonworld4all" in url:
1812
+ print("entered toonworld4all:",url)
1813
+ return await toonworld4all(url)
1814
+ elif "toonshub" in url:
1815
+ return await toonhub_scrapper(url)
1816
+
1817
+ # elif "drive.google.com/" in url:
1818
+ # if 'view' in url:
1819
+ # d=url.index('view')
1820
+ # url=url[0:66].split('/')
1821
+ # url=url[-2]
1822
+ # return f'🔗Link: <a href="https://indexlink.mrprincebotz.workers.dev/direct.aspx?id={url}">ɪɴᴅᴇx ʟɪɴᴋ</a>'
1823
+ # elif 'id' in url:
1824
+ # try:
1825
+ # ur=url.index('&export=download')
1826
+ # url=url[0:64].split('=')
1827
+ # url=url[-1]
1828
+ # return f'🔗Link: <a href="https://indexlink.mrprincebotz.workers.dev/direct.aspx?id={url}">ɪɴᴅᴇx ʟɪɴᴋ</a>'
1829
+ # except:
1830
+ # url=url[0:64].split('=')
1831
+ # url=url[-1]
1832
+ # return f'🔗Link: <a href="https://indexlink.mrprincebotz.workers.dev/direct.aspx?id={url}">ɪɴᴅᴇx ʟɪɴᴋ</a>'
1833
+ elif "drive.google.com/" in url:
1834
+ return f'🔗ɪɴᴅᴇx ʟɪɴᴋ: {get_dl(url)}'
1835
+ #vnshortener
1836
+ elif "vnshortener.com" in url:
1837
+ print("entered vnshortener:",url)
1838
+ return vnshortener(url)
1839
+ elif "themoviesboss.site/secret?" in url:
1840
+ print("entered themoviesboss:",url)
1841
+ return themoviesboss(url)
1842
+ elif "themoviesboss.site" in url:
1843
+ print("entered moviesboss:",url)
1844
+ return moviesboss(url)
1845
+ elif 'https://files.technicalatg.com/' in url:
1846
+ code=url.split('/')[-1]
1847
+ return f'https://atglinks.com/{code}'
1848
+ elif 'https://atglinks.com/' in url:
1849
+ return f"There's NO Bypass for atglinks.com Now "
1850
+ #tenbit
1851
+ elif "https://10bitclub.me" in url:
1852
+ print("entered 10bitclub:",url)
1853
+ return tenbit(url)
1854
+
1855
+ elif "https://animepahe.ru" in url:
1856
+ print("entered animepahe:",url)
1857
+ return animepahe(url)
1858
+
1859
+ #du_link
1860
+ elif "https://du-link.in" in url:
1861
+ print("entered du_link:",url)
1862
+ return du_link(url)
1863
+ #atozcartoonist
1864
+ elif "https://www.atozcartoonist.com/redirect/" in url:
1865
+ print("entered atozcartoonist:",url)
1866
+ return atozcartoonist(url)
1867
+ elif "https://www.atozcartoonist.com/" in url:
1868
+ print("entered atoz:",url)
1869
+ return atoz(url)
1870
+ elif 'atishmkv.wiki' in url:
1871
+ print(f"entered atishmkv: {url}")
1872
+ return await atishmkv(url)
1873
+ #telegraph_scaper
1874
+ elif 'https://graph.org' in url:
1875
+ print(f"entered telegraph_scaper: {url}")
1876
+ return await telegraph_scaper(url)
1877
+
1878
+ elif "shrinkforearn" in url:
1879
+ return await transcript(url,"https://shrinkforearn.in/","https://wp.uploadfiles.in/", 10)
1880
+ elif "link.short2url" in url:
1881
+ return await transcript(url,"https://techyuth.xyz/blog/", "https://blog.mphealth.online/", 9)
1882
+ elif "viplinks" in url:
1883
+ return await transcript(url,"https://m.vip-link.net/", "https://m.leadcricket.com/", 5)
1884
+ elif "bindaaslinks" in url:
1885
+ return await transcript(url,"https://thebindaas.com/blog/", "https://finance.appsinsta.com/", 5)
1886
+ elif "sheralinks" in url:
1887
+ return await transcript(url,"https://link.blogyindia.com/", "https://blogyindia.com/", 5)
1888
+ elif "url4earn" in url:
1889
+ return await transcript(url,"https://go.url4earn.in/", "https://techminde.com/", 8)
1890
+ elif "tglink" in url:
1891
+ url=url.lower()
1892
+ return await transcript(url, "https://tglink.in/", "https://www.proappapk.com/", 5)
1893
+ elif "link1s.com" in url:
1894
+ return await transcript(url,"https://link1s.com/","https://anhdep24.com/",8)
1895
+ elif "gofile.io" in url:
1896
+ return gofile_dl(url)
1897
+ elif "publicearn" in url:
1898
+ return publicearn(url,uid)
1899
+ elif "links4money.com" in url:
1900
+ return await transcript(url,'https://links4money.com','https://gamergiri.infokeeda.xyz',2)
1901
+ elif "happiurl.com" in url:
1902
+ return await transcript(url,'https://count.financevis.com/','https://financevis.com/',5)
1903
+ elif "linkfly" in url:
1904
+ return await transcript(url, "https://go.linkfly.in", "https://techyblogs.in/", 4)
1905
+ elif "mdiskshortner" in url:
1906
+ return await transcript(url, "https://loans.yosite.net", "https://yosite.net", 10)
1907
+ elif "narzolinks" in url:
1908
+ return await transcript(url, "https://go.narzolinks.click/", "https://hydtech.in/", 5)
1909
+ elif "earn2me" in url:
1910
+ return await transcript(url, "https://blog.filepresident.com/", "https://easyworldbusiness.com/", 5)
1911
+ elif "adsfly" in url:
1912
+ return await transcript(url, "https://go.adsfly.in/", "https://loans.quick91.com/", 5)
1913
+ elif "link4earn" in url:
1914
+ return await transcript(url, "https://link4earn.com", "https://studyis.xyz/", 5)
1915
+ elif "pdisk.site" in url:
1916
+ return await transcript(url, "https://go.moneycase.link", "https://go.moneycase.link", 2)
1917
+ elif "link.tnshort.net/" in url or "link.tnlink.net/" in url:
1918
+ return await transcript(url, "https://go.tnshort.net", "https://market.finclub.in", 10)
1919
+ elif "ziplinker.net" in url:
1920
+ return await transcript(url,'https://ziplinker.net/web','https://ontechhindi.com/',0.04)
1921
+ elif "urllinkshort.in" in url:
1922
+ return await transcript(url,'https://web.urllinkshort.in/','https://suntechu.in/',4.5)
1923
+ elif "kpslink.in" in url:
1924
+ return await transcript(url, "https://get.infotamizhan.xyz/", "https://infotamizhan.xyz/", 5)
1925
+ elif "v2.kpslink.in" in url:
1926
+ return await transcript(url, "https://v2download.kpslink.in/", "https://infotamizhan.xyz/", 5)
1927
+ elif "go.lolshort" in url:
1928
+ return await transcript('http://go.lolshort.tech/DoCpsBrG', "https://get.lolshort.tech/", "https://tech.animezia.com/", 8)
1929
+ elif "go.lolshort" in url:
1930
+ return await transcript(url, "https://blog.vllinks.in", "https://vlnewshd.in/", 8)
1931
+ elif "onepagelink" in url:
1932
+ return await transcript(url, "https://go.onepagelink.in/", "https://gorating.in/", 0.9)
1933
+ elif "pkin" in url:
1934
+ return await transcript(url, "https://go.paisakamalo.in", "https://techkeshri.com/", 9)
1935
+ elif "shrinke" in url:
1936
+ return await transcript(url, "https://en.shrinke.me/", "https://themezon.net/", 15)
1937
+ elif "mplaylink" in url:
1938
+ return await transcript(url, "https://tera-box.cloud/", "https://mvplaylink.in.net/", 0.5)
1939
+ elif "ewlink" in url:
1940
+ return await transcript(url, "https://ewlink.xyz/", "https://rxfastrx.com/", 0)
1941
+ elif "sklinks" in url:
1942
+ return await transcript(url, "https://sklinks.in", "https://sklinks.in/", 4.5)
1943
+ elif "dalink" in url:
1944
+ return await transcript(url, "https://get.tamilhit.tech/X/LOG-E/", "https://www.tamilhit.tech/", 8)
1945
+ elif "rocklinks" in url:
1946
+ return await transcript(url, "https://insurance.techymedies.com/", "https://highkeyfinance.com/", 5)
1947
+ elif "short_jambo" in url:
1948
+ return await transcript(url, "https://short-jambo.com/","https://1.newworldnew.com/",0.7)
1949
+ elif "ez4short" in url:
1950
+ return await transcript(url, "https://ez4short.com/","https://ez4mods.com/",5)
1951
+ elif "shortingly.com" in url:
1952
+ return await transcript(url,"https://go.blogytube.com/","https://blogytube.com/",1)
1953
+ elif "https://gyanilinks.com/" in url or "https://gtlinks.me/" in url:
1954
+ return await transcript(url,"https://go.hipsonyc.com","https://earn.hostadviser.net",5)
1955
+ elif "https://flashlinks.in/" in url:
1956
+ return await transcript(url,"https://flashlinks.in", "https://flashlinks.online/",13)
1957
+ elif "urlsopen" in url:
1958
+ return await transcript(url, "https://s.humanssurvival.com/", "https://1topjob.xyz/", 1)
1959
+ elif "xpshort" in url:
1960
+ return f"Can't Bypass.Invisible captcha"
1961
+ #return await transcript(url, "https://techymozo.com/", "https://portgyaan.in/", 0)
1962
+ elif "go.moonlinks.in/" in url:
1963
+ return await transcript(url, "https://go.moonlinks.in/", "https://www.akcartoons.in/", 7)
1964
+ elif "vivdisk" in url:
1965
+ return await transcript(url, "https://tinyfy.in/", "https://web.yotrickslog.tech/", 0)
1966
+ elif "https://krownlinks.me" in url:
1967
+ return await transcript(url, "https://go.hostadviser.net/", "https://blog.hostadviser.net/", 8)
1968
+ elif "adrinolink" in url:
1969
+ return f'https://bhojpuritop.in/safe.php?link={url.split("/")[-1]}'#await transcript(url, "https://adrinolinks.in/", "https://amritadrino.com", 8)
1970
+ elif "mdiskshortner" in url:
1971
+ return await transcript(url, "https://mdiskshortner.link", "https://m.proappapk.com", 2)
1972
+ elif "tiny" in url:
1973
+ return await transcript(url, "https://tinyfy.in", "https://www.yotrickslog.tech", 0)
1974
+ elif "earnl" in url:
1975
+ return await transcript(url, "https://v.earnl.xyz", "https://link.modmakers.xyz", 5)
1976
+ elif "moneykamalo" in url:
1977
+ return await transcript(url, "https://go.moneykamalo.com", "https://blog.techkeshri.com", 5)
1978
+ elif "v2links" in url:
1979
+ return await transcript(url, "https://vzu.us", "https://gadgetsreview27.com", 15)
1980
+ elif "tnvalue" in url:
1981
+ return await transcript(url, "https://get.tnvalue.in/", "https://finclub.in", 8)
1982
+ elif "omnifly" in url:
1983
+ return await transcript(url, "https://f.omnifly.in.net/", "https://f.omnifly.in.net/", 8)
1984
+ elif "indianshortner" in url:
1985
+ return await transcript(url, "https://indianshortner.com/", "https://moddingzone.in", 5)
1986
+ elif "indianshortner" in url:
1987
+ return await transcript(url, "https://techy.veganab.co", "https://veganab.co/", 8)
1988
+ elif "indi" in url:
1989
+ return await transcript(url, "https://file.earnash.com/", "https://indiurl.cordtpoint.co.in/", 10)
1990
+ elif "linkbnao" in url:
1991
+ return await transcript(url, "https://vip.linkbnao.com", "https://ffworld.xyz/", 2)
1992
+ elif "mdiskpro" in url:
1993
+ return await transcript(url, "https://mdisk.pro", "https://www.meclipstudy.in", 8)
1994
+ elif "omegalinks" in url:
1995
+ return await transcript(url, "https://tera-box.com", "https://m.meclipstudy.in", 8)
1996
+ elif "mdisklink" in url:
1997
+ return await transcript(url, "https://powerlinkz.in", "https://powerlinkz.in", 2)
1998
+ elif "indshort" in url:
1999
+ return await transcript(url, "https://indianshortner.com", "https://moddingzone.in", 5)
2000
+ elif "indyshare" in url:
2001
+ return await transcript(url, "https://download.indyshare.net", "https://bestdjsong.com/", 15)
2002
+ elif "mdisklink" in url:
2003
+ return await transcript(url, "https://gotolink.mdisklink.link/", "https://loans.yosite.net/", 2)
2004
+ elif "tamizhmasters" in url:
2005
+ return await transcript(url, "https://tamizhmasters.com/", "https://pokgames.com/", 5)
2006
+ elif "vipurl" in url:
2007
+ return await transcript(url, "https://count.vipurl.in/", "https://awuyro.com/", 8)
2008
+ elif "linksly" in url:
2009
+ return await transcript(url, "https://go.linksly.co", "https://en.themezon.net/", 10)
2010
+ elif "link1s" in url:
2011
+ return await transcript(url,'https://link1s.net','https://nguyenvanbao.com/',0)
2012
+ elif "sxslink" in url:
2013
+ return await transcript(url, "https://getlink.sxslink.com/", "https://cinemapettai.in/", 5)
2014
+ elif "urlspay.in" in url:
2015
+ return await transcript(url, "https://finance.smallinfo.in/", "https://loans.techyinfo.in/", 5)
2016
+ elif "linkpays.in" in url:
2017
+ return await transcript(url, "https://tech.smallinfo.in/Gadget/", "https://loan.insuranceinfos.in/", 5)
2018
+ elif "seturl.in" in url:
2019
+ return await transcript(url,'https://set.seturl.in/','https://earn.petrainer.in/',5)
2020
+ else: return "Not in Supported Sites"
2021
+
2022
+ ################################################################################################################################
database.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #(c) Adarsh-Goel
2
+ import datetime
3
+ import motor.motor_asyncio
4
+
5
+
6
+ class Database:
7
+ def __init__(self, uri, database_name):
8
+ self._client = motor.motor_asyncio.AsyncIOMotorClient(uri)
9
+ self.db = self._client[database_name]
10
+ self.col = self.db.users
11
+
12
+ def new_user(self, id):
13
+ return dict(
14
+ id=id,
15
+ join_date=datetime.date.today().isoformat()
16
+ )
17
+
18
+ async def add_user(self, id):
19
+ user = self.new_user(id)
20
+ await self.col.insert_one(user)
21
+
22
+ async def add_user_pass(self, id, ag_pass):
23
+ await self.add_user(int(id))
24
+ await self.col.update_one({'id': int(id)}, {'$set': {'ag_p': ag_pass}})
25
+
26
+ async def get_user_pass(self, id):
27
+ user_pass = await self.col.find_one({'id': int(id)})
28
+ return user_pass.get("ag_p", None) if user_pass else None
29
+
30
+ async def is_user_exist(self, id):
31
+ user = await self.col.find_one({'id': int(id)})
32
+ return True if user else False
33
+
34
+ async def total_users_count(self):
35
+ count = await self.col.count_documents({})
36
+ return count
37
+
38
+ async def get_all_users(self):
39
+ all_users = self.col.find({})
40
+ return all_users
41
+
42
+ async def delete_user(self, user_id):
43
+ await self.col.delete_many({'id': int(user_id)})
ddl.py ADDED
@@ -0,0 +1,865 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from base64 import standard_b64encode
2
+ from json import loads
3
+ from math import floor, pow
4
+ from os import environ
5
+ from re import findall, match, search, sub
6
+ from time import sleep
7
+ from urllib.parse import quote, unquote, urlparse
8
+ from uuid import uuid4
9
+ import aiohttp
10
+ from bs4 import BeautifulSoup
11
+ from cfscrape import create_scraper
12
+ from lk21 import Bypass
13
+ from lxml import etree
14
+ from requests import get,session
15
+ import requests
16
+
17
+ GDTOT_CRYPT = environ.get("CRYPT","b0lDek5LSCt6ZjVRR2EwZnY4T1EvVndqeDRtbCtTWmMwcGNuKy8wYWpDaz0%3D")
18
+ UPTOBOX_TOKEN = environ.get("UPTOBOX_TOKEN","4a4ecf35552fea876da1d63e7fd000d2cb2fo")
19
+ ndus = environ.get("TERA_COOKIE","YyWQSuHteHuijCfIgBiScxzV7iD2JVex_Wvd3KlE") # YQOR7exteHuiC7XNl_TAD_ZaXGexSokJJwoblC4S
20
+ if ndus is None: TERA_COOKIE = None
21
+ else: TERA_COOKIE = {"ndus": ndus}
22
+
23
+
24
+ ddllist = ['yadi.sk','disk.yandex.com','mediafire.com','uptobox.com','osdn.net','github.com',
25
+ 'hxfile.co','1drv.ms','pixeldrain.com','antfiles.com','streamtape','racaty','1fichier.com',
26
+ 'solidfiles.com','krakenfiles.com','mdisk.me','upload.ee','akmfiles','linkbox','shrdsk','letsupload.io',
27
+ 'zippyshare.com','wetransfer.com','we.tl','terabox','nephobox','4funbox','mirrobox','momerybox',
28
+ 'teraboxapp','sbembed.com','watchsb.com','streamsb.net','sbplay.org','filepress',
29
+ 'fembed.net', 'fembed.com', 'femax20.com', 'fcdn.stream', 'feurl.com', 'layarkacaxxi.icu',
30
+ 'naniplay.nanime.in', 'naniplay.nanime.biz', 'naniplay.com', 'mm9842.com','anonfiles.com',
31
+ 'hotfile.io', 'bayfiles.com', 'megaupload.nz', 'letsupload.cc','filechan.org', 'myfile.is',
32
+ 'vshare.is', 'rapidshare.nu', 'lolabits.se','openload.cc', 'share-online.is', 'upvid.cc','pressbee']
33
+
34
+
35
+ def is_share_link(url):
36
+ return bool(match(r'https?:\/\/.+\.gdtot\.\S+|https?:\/\/(filepress|filebee|appdrive|gdflix|driveseed|new.filepress|new2.filepress|new3.filepress|new4.filepress|pressbee)\.\S+', url))
37
+ class DDLException(Exception):
38
+ """Not method found for extracting direct download link from the http link"""
39
+ pass
40
+
41
+ def get_readable_time(seconds):
42
+ result = ''
43
+ (days, remainder) = divmod(seconds, 86400)
44
+ days = int(days)
45
+ if days != 0:
46
+ result += f'{days}d'
47
+ (hours, remainder) = divmod(remainder, 3600)
48
+ hours = int(hours)
49
+ if hours != 0:
50
+ result += f'{hours}h'
51
+ (minutes, seconds) = divmod(remainder, 60)
52
+ minutes = int(minutes)
53
+ if minutes != 0:
54
+ result += f'{minutes}m'
55
+ seconds = int(seconds)
56
+ result += f'{seconds}s'
57
+ return result
58
+
59
+
60
+ fmed_list = ['fembed.net', 'fembed.com', 'femax20.com', 'fcdn.stream', 'feurl.com', 'layarkacaxxi.icu',
61
+ 'naniplay.nanime.in', 'naniplay.nanime.biz', 'naniplay.com', 'mm9842.com']
62
+
63
+ anonfilesBaseSites = ['anonfiles.com', 'hotfile.io', 'bayfiles.com', 'megaupload.nz', 'letsupload.cc',
64
+ 'filechan.org', 'myfile.is', 'vshare.is', 'rapidshare.nu', 'lolabits.se',
65
+ 'openload.cc', 'share-online.is', 'upvid.cc']
66
+
67
+ async def seconds_to_hhmmss(seconds):
68
+ hours = int(seconds // 3600)
69
+ minutes = int((seconds % 3600) // 60)
70
+ seconds = int(seconds % 60)
71
+ return f"{hours:02d}:{minutes:02d}:{seconds:02d}"
72
+
73
+
74
+ async def humanbytes(size):
75
+ print(type(size))
76
+ if not size:
77
+ return ""
78
+ power = 2**10
79
+ n = 0
80
+ Dic_powerN = {0: ' ', 1: 'Ki', 2: 'Mi', 3: 'Gi', 4: 'Ti'}
81
+ while size > power:
82
+ size /= power
83
+ n += 1
84
+ return str(round(size, 2)) + " " + Dic_powerN[n] + 'B'
85
+
86
+
87
+ async def direct_link_generator(link: str):
88
+ """ direct links generator """
89
+ domain = urlparse(link).hostname
90
+ if 'yadi.sk' in domain or 'disk.yandex.com' in domain:
91
+ return yandex_disk(link)
92
+ elif 'mediafire.com' in domain:
93
+ return mediafire(link)
94
+ elif 'uptobox.com' in domain:
95
+ return uptobox(link)
96
+ elif 'osdn.net' in domain:
97
+ return osdn(link)
98
+ elif 'github.com' in domain:
99
+ return github(link)
100
+ elif 'hxfile.co' in domain:
101
+ return hxfile(link)
102
+ elif '1drv.ms' in domain:
103
+ return onedrive(link)
104
+ elif 'pixeldrain.com' in domain:
105
+ return pixeldrain(link)
106
+ elif 'antfiles.com' in domain:
107
+ return antfiles(link)
108
+ elif 'streamtape' in domain:
109
+ return streamtape(link)
110
+ elif 'racaty' in domain:
111
+ return racaty(link)
112
+ elif '1fichier.com' in domain:
113
+ return fichier(link)
114
+ elif 'solidfiles.com' in domain:
115
+ return solidfiles(link)
116
+ elif 'krakenfiles.com' in domain:
117
+ return krakenfiles(link)
118
+ elif 'upload.ee' in domain:
119
+ return uploadee(link)
120
+ elif 'akmfiles' in domain:
121
+ return akmfiles(link)
122
+ elif 'linkbox' in domain:
123
+ return linkbox(link)
124
+ elif 'shrdsk' in domain:
125
+ return shrdsk(link)
126
+ elif 'letsupload.io' in domain:
127
+ return letsupload(link)
128
+ elif 'zippyshare.com' in domain:
129
+ return zippyshare(link)
130
+ elif 'mdisk.me' in domain:
131
+ return mdisk(link)
132
+ elif any(x in domain for x in ['wetransfer.com', 'we.tl']):
133
+ return wetransfer(link)
134
+ elif any(x in domain for x in anonfilesBaseSites):
135
+ return anonfilesBased(link)
136
+ elif any(x in domain for x in ['terabox', 'nephobox', '4funbox', 'mirrobox', 'momerybox', 'teraboxapp']):
137
+ return terabox(link)
138
+ elif any(x in domain for x in fmed_list):
139
+ return fembed(link)
140
+ elif any(x in domain for x in ['sbembed.com', 'watchsb.com', 'streamsb.net', 'sbplay.org']):
141
+ return sbembed(link)
142
+ elif is_share_link(link):
143
+ if 'gdtot' in domain:
144
+ return gdtot(link)
145
+ elif 'filepress' in domain or 'new2.filepress.store' in domain or 'pressbee' in domain or 'new3.filepress' in domain or 'new4.filepress' in domain:
146
+ return await filepress(link)
147
+ else:
148
+ return sharer_scraper(link)
149
+ else:
150
+ return f'No Direct link function found for\n\n{link}\n\nuse /ddllist'
151
+
152
+
153
+ def mdisk(url):
154
+ header = {
155
+ 'Accept': '*/*',
156
+ 'Accept-Language': 'en-US,en;q=0.5',
157
+ 'Accept-Encoding': 'gzip, deflate, br',
158
+ 'Referer': 'https://mdisk.me/',
159
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36'
160
+ }
161
+ id = url.split("/")[-1]
162
+ URL = f'https://diskuploader.entertainvideo.com/v1/file/cdnurl?param={id}'
163
+ return get(url=URL, headers=header).json()['source']
164
+
165
+
166
+ def yandex_disk(url: str) -> str:
167
+ """ Yandex.Disk direct link generator
168
+ Based on https://github.com/wldhx/yadisk-direct """
169
+ try:
170
+ link = findall(r'\b(https?://(yadi.sk|disk.yandex.com)\S+)', url)[0][0]
171
+ except IndexError:
172
+ return "No Yandex.Disk links found\n"
173
+ api = 'https://cloud-api.yandex.net/v1/disk/public/resources/download?public_key={}'
174
+ cget = create_scraper().request
175
+ try:
176
+ return cget('get', api.format(link)).json()['href']
177
+ except KeyError:
178
+ return (
179
+ "ERROR: File not found/Download limit reached")
180
+
181
+
182
+ def uptobox(url: str) -> str:
183
+ """ Uptobox direct link generator
184
+ based on https://github.com/jovanzers/WinTenCermin and https://github.com/sinoobie/noobie-mirror """
185
+ try:
186
+ link = findall(r'\bhttps?://.*uptobox\.com\S+', url)[0]
187
+ except IndexError:
188
+ return ("No Uptobox links found")
189
+ link = findall(r'\bhttps?://.*\.uptobox\.com/dl\S+', url)
190
+ if link: return link[0]
191
+ cget = create_scraper().request
192
+ try:
193
+ file_id = findall(r'\bhttps?://.*uptobox\.com/(\w+)', url)[0]
194
+ if UPTOBOX_TOKEN:
195
+ file_link = f'https://uptobox.com/api/link?token={UPTOBOX_TOKEN}&file_code={file_id}'
196
+ else:
197
+ file_link = f'https://uptobox.com/api/link?file_code={file_id}'
198
+ res = cget('get', file_link).json()
199
+ except Exception as e:
200
+ return (f"ERROR: {e.__class__.__name__}")
201
+ if res['statusCode'] == 0:
202
+ return res['data']['dlLink']
203
+ elif res['statusCode'] == 16:
204
+ sleep(1)
205
+ waiting_token = res["data"]["waitingToken"]
206
+ sleep(res["data"]["waiting"])
207
+ elif res['statusCode'] == 39:
208
+ return (
209
+ f"ERROR: Uptobox is being limited please wait {get_readable_time(res['data']['waiting'])}")
210
+ else:
211
+ return (f"ERROR: {res['message']}")
212
+ try:
213
+ res = cget('get', f"{file_link}&waitingToken={waiting_token}").json()
214
+ return res['data']['dlLink']
215
+ except Exception as e:
216
+ return (f"ERROR: {e.__class__.__name__}")
217
+
218
+
219
+ def mediafire(url: str) -> str:
220
+ final_link = findall(r'https?:\/\/download\d+\.mediafire\.com\/\S+\/\S+\/\S+', url)
221
+ if final_link: return final_link[0]
222
+ cget = create_scraper().request
223
+ try:
224
+ url = cget('get', url).url
225
+ page = cget('get', url).text
226
+ except Exception as e:
227
+ return (f"ERROR: {e.__class__.__name__}")
228
+ final_link = findall(r"\'(https?:\/\/download\d+\.mediafire\.com\/\S+\/\S+\/\S+)\'", page)
229
+ if not final_link:return ("ERROR: No links found in this page")
230
+ return final_link[0]
231
+
232
+
233
+ def osdn(url: str) -> str:
234
+ """ OSDN direct link generator """
235
+ osdn_link = 'https://osdn.net'
236
+ try:
237
+ link = findall(r'\bhttps?://.*osdn\.net\S+', url)[0]
238
+ except IndexError:
239
+ return ("No OSDN links found")
240
+ cget = create_scraper().request
241
+ try:
242
+ page = BeautifulSoup(
243
+ cget('get', link, allow_redirects=True).content, 'lxml')
244
+ except Exception as e:
245
+ return (f"ERROR: {e.__class__.__name__}")
246
+ info = page.find('a', {'class': 'mirror_link'})
247
+ link = unquote(osdn_link + info['href'])
248
+ mirrors = page.find('form', {'id': 'mirror-select-form'}).findAll('tr')
249
+ urls = []
250
+ for data in mirrors[1:]:
251
+ mirror = data.find('input')['value']
252
+ urls.append(sub(r'm=(.*)&f', f'm={mirror}&f', link))
253
+ return urls[0]
254
+
255
+
256
+ def github(url: str) -> str:
257
+ """ GitHub direct links generator """
258
+ try:
259
+ findall(r'\bhttps?://.*github\.com.*releases\S+', url)[0]
260
+ except IndexError:
261
+ return ("No GitHub Releases links found")
262
+ cget = create_scraper().request
263
+ download = cget('get', url, stream=True, allow_redirects=False)
264
+ try:
265
+ return download.headers["location"]
266
+ except KeyError:
267
+ return ("ERROR: Can't extract the link")
268
+
269
+
270
+ def hxfile(url: str) -> str:
271
+ """ Hxfile direct link generator
272
+ Based on https://github.com/zevtyardt/lk21
273
+ """
274
+ try:
275
+ return Bypass().bypass_filesIm(url)
276
+ except Exception as e:
277
+ return (f"ERROR: {e.__class__.__name__}")
278
+
279
+
280
+ def letsupload(url: str) -> str:
281
+ cget = create_scraper().request
282
+ try:
283
+ res = cget("POST", url)
284
+ except Exception as e:
285
+ return (f'ERROR: {e.__class__.__name__}')
286
+ direct_link = findall(r"(https?://letsupload\.io\/.+?)\'", res.text)
287
+ if direct_link: return direct_link[0]
288
+ else:
289
+ return ('ERROR: Direct Link not found')
290
+
291
+
292
+ def anonfilesBased(url: str) -> str:
293
+ cget = create_scraper().request
294
+ try:
295
+ soup = BeautifulSoup(cget('get', url).content, 'lxml')
296
+ except Exception as e:
297
+ return (f"ERROR: {e.__class__.__name__}")
298
+ sa = soup.find(id="download-url")
299
+ if sa: return sa['href']
300
+ return ("ERROR: File not found!")
301
+
302
+
303
+ def fembed(link: str) -> str:
304
+ """ Fembed direct link generator
305
+ Based on https://github.com/zevtyardt/lk21
306
+ """
307
+ try:
308
+ dl_url = Bypass().bypass_fembed(link)
309
+ count = len(dl_url)
310
+ lst_link = [dl_url[i] for i in dl_url]
311
+ return lst_link[count-1]
312
+ except Exception as e:
313
+ return (f"ERROR: {e.__class__.__name__}")
314
+
315
+
316
+ def sbembed(link: str) -> str:
317
+ """ Sbembed direct link generator
318
+ Based on https://github.com/zevtyardt/lk21
319
+ """
320
+ try:
321
+ dl_url = Bypass().bypass_sbembed(link)
322
+ count = len(dl_url)
323
+ lst_link = [dl_url[i] for i in dl_url]
324
+ return lst_link[count-1]
325
+ except Exception as e:
326
+ return (f"ERROR: {e.__class__.__name__}")
327
+
328
+
329
+ def onedrive(link: str) -> str:
330
+ """ Onedrive direct link generator
331
+ Based on https://github.com/UsergeTeam/Userge """
332
+ link_without_query = urlparse(link)._replace(query=None).geturl()
333
+ direct_link_encoded = str(standard_b64encode(
334
+ bytes(link_without_query, "utf-8")), "utf-8")
335
+ direct_link1 = f"https://api.onedrive.com/v1.0/shares/u!{direct_link_encoded}/root/content"
336
+ cget = create_scraper().request
337
+ try:
338
+ resp = cget('head', direct_link1)
339
+ except Exception as e:
340
+ return (f"ERROR: {e.__class__.__name__}")
341
+ if resp.status_code != 302:
342
+ return (
343
+ "ERROR: Unauthorized link, the link may be private")
344
+ return resp.next.url
345
+
346
+
347
+ def pixeldrain(url: str) -> str:
348
+ """ Based on https://github.com/yash-dk/TorToolkit-Telegram """
349
+ url = url.strip("/ ")
350
+ file_id = url.split("/")[-1]
351
+ if url.split("/")[-2] == "l":
352
+ info_link = f"https://pixeldrain.com/api/list/{file_id}"
353
+ dl_link = f"https://pixeldrain.com/api/list/{file_id}/zip"
354
+ else:
355
+ info_link = f"https://pixeldrain.com/api/file/{file_id}/info"
356
+ dl_link = f"https://pixeldrain.com/api/file/{file_id}"
357
+ cget = create_scraper().request
358
+ try:
359
+ resp = cget('get', info_link).json()
360
+ except Exception as e:
361
+ return (f"ERROR: {e.__class__.__name__}")
362
+ if resp["success"]:
363
+ return dl_link
364
+ else:
365
+ return (
366
+ f"ERROR: Cant't download due {resp['message']}.")
367
+
368
+
369
+ def antfiles(url: str) -> str:
370
+ """ Antfiles direct link generator
371
+ Based on https://github.com/zevtyardt/lk21
372
+ """
373
+ try:
374
+ return Bypass().bypass_antfiles(url)
375
+ except Exception as e:
376
+ return (f"ERROR: {e.__class__.__name__}")
377
+
378
+
379
+ def streamtape(url: str) -> str:
380
+ response = get(url)
381
+
382
+ if (videolink := findall(r"document.*((?=id\=)[^\"']+)", response.text)):
383
+ nexturl = "https://streamtape.com/get_video?" + videolink[-1]
384
+ try: return nexturl
385
+ except Exception as e: return (f"ERROR: {e.__class__.__name__}")
386
+
387
+
388
+ def racaty(url: str) -> str:
389
+ """ Racaty direct link generator
390
+ By https://github.com/junedkh """
391
+ cget = create_scraper().request
392
+ try:
393
+ url = cget('GET', url).url
394
+ json_data = {
395
+ 'op': 'download2',
396
+ 'id': url.split('/')[-1]
397
+ }
398
+ res = cget('POST', url, data=json_data)
399
+ except Exception as e:
400
+ return (f'ERROR: {e.__class__.__name__}')
401
+ html_tree = etree.HTML(res.text)
402
+ direct_link = html_tree.xpath("//a[contains(@id,'uniqueExpirylink')]/@href")
403
+ if direct_link:
404
+ return direct_link[0]
405
+ else:
406
+ return ('ERROR: Direct link not found')
407
+
408
+
409
+ def fichier(link: str) -> str:
410
+ """ 1Fichier direct link generator
411
+ Based on https://github.com/Maujar
412
+ """
413
+ regex = r"^([http:\/\/|https:\/\/]+)?.*1fichier\.com\/\?.+"
414
+ gan = match(regex, link)
415
+ if not gan:
416
+ return (
417
+ "ERROR: The link you entered is wrong!")
418
+ if "::" in link:
419
+ pswd = link.split("::")[-1]
420
+ url = link.split("::")[-2]
421
+ else:
422
+ pswd = None
423
+ url = link
424
+ cget = create_scraper().request
425
+ try:
426
+ if pswd is None:
427
+ req = cget('post', url)
428
+ else:
429
+ pw = {"pass": pswd}
430
+ req = cget('post', url, data=pw)
431
+ except Exception as e:
432
+ return (f"ERROR: {e.__class__.__name__}")
433
+ if req.status_code == 404:
434
+ return (
435
+ "ERROR: File not found/The link you entered is wrong!")
436
+ soup = BeautifulSoup(req.content, 'lxml')
437
+ if soup.find("a", {"class": "ok btn-general btn-orange"}):
438
+ dl_url = soup.find("a", {"class": "ok btn-general btn-orange"})["href"]
439
+ if dl_url: return dl_url
440
+ return (
441
+ "ERROR: Unable to generate Direct Link 1fichier!")
442
+ elif len(soup.find_all("div", {"class": "ct_warn"})) == 3:
443
+ str_2 = soup.find_all("div", {"class": "ct_warn"})[-1]
444
+ if "you must wait" in str(str_2).lower():
445
+ numbers = [int(word) for word in str(str_2).split() if word.isdigit()]
446
+ if numbers: return (
447
+ f"ERROR: 1fichier is on a limit. Please wait {numbers[0]} minute.")
448
+ else:
449
+ return (
450
+ "ERROR: 1fichier is on a limit. Please wait a few minutes/hour.")
451
+ elif "protect access" in str(str_2).lower():
452
+ return (
453
+ "ERROR: This link requires a password!\n\n<b>This link requires a password!</b>\n- Insert sign <b>::</b> after the link and write the password after the sign.\n\n<b>Example:</b> https://1fichier.com/?smmtd8twfpm66awbqz04::love you\n\n* No spaces between the signs <b>::</b>\n* For the password, you can use a space!")
454
+ else:
455
+ return (
456
+ "ERROR: Failed to generate Direct Link from 1fichier!")
457
+ elif len(soup.find_all("div", {"class": "ct_warn"})) == 4:
458
+ str_1 = soup.find_all("div", {"class": "ct_warn"})[-2]
459
+ str_3 = soup.find_all("div", {"class": "ct_warn"})[-1]
460
+ if "you must wait" in str(str_1).lower():
461
+ numbers = [int(word) for word in str(str_1).split() if word.isdigit()]
462
+ if numbers: return (
463
+ f"ERROR: 1fichier is on a limit. Please wait {numbers[0]} minute.")
464
+ else:
465
+ return (
466
+ "ERROR: 1fichier is on a limit. Please wait a few minutes/hour.")
467
+ elif "bad password" in str(str_3).lower():
468
+ return (
469
+ "ERROR: The password you entered is wrong!")
470
+ else:
471
+ return (
472
+ "ERROR: Error trying to generate Direct Link from 1fichier!")
473
+ else:
474
+ return (
475
+ "ERROR: Error trying to generate Direct Link from 1fichier!")
476
+
477
+
478
+ def solidfiles(url: str) -> str:
479
+ """ Solidfiles direct link generator
480
+ Based on https://github.com/Xonshiz/SolidFiles-Downloader
481
+ By https://github.com/Jusidama18 """
482
+ cget = create_scraper().request
483
+ try:
484
+ headers = {
485
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36'
486
+ }
487
+ pageSource = cget('get', url, headers=headers).text
488
+ mainOptions = str(
489
+ search(r'viewerOptions\'\,\ (.*?)\)\;', pageSource).group(1))
490
+ return loads(mainOptions)["downloadUrl"]
491
+ except Exception as e:
492
+ return (f"ERROR: {e.__class__.__name__}")
493
+
494
+
495
+ def krakenfiles(page_link: str) -> str:
496
+ """ krakenfiles direct link generator
497
+ Based on https://github.com/tha23rd/py-kraken
498
+ By https://github.com/junedkh """
499
+ cget = create_scraper().request
500
+ try:
501
+ page_resp = cget('get', page_link)
502
+ except Exception as e:
503
+ return (f"ERROR: {e.__class__.__name__}")
504
+ soup = BeautifulSoup(page_resp.text, "lxml")
505
+ try:
506
+ token = soup.find("input", id="dl-token")["value"]
507
+ except:
508
+ return (
509
+ f"ERROR: Page link is wrong: {page_link}")
510
+ hashes = [
511
+ item["data-file-hash"]
512
+ for item in soup.find_all("div", attrs={"data-file-hash": True})
513
+ ]
514
+ if not hashes:
515
+ return (
516
+ f"ERROR: Hash not found for : {page_link}")
517
+ dl_hash = hashes[0]
518
+ payload = f'------WebKitFormBoundary7MA4YWxkTrZu0gW\r\nContent-Disposition: form-data; name="token"\r\n\r\n{token}\r\n------WebKitFormBoundary7MA4YWxkTrZu0gW--'
519
+ headers = {
520
+ "content-type": "multipart/form-data; boundary=----WebKitFormBoundary7MA4YWxkTrZu0gW",
521
+ "cache-control": "no-cache",
522
+ "hash": dl_hash,
523
+ }
524
+ dl_link_resp = cget(
525
+ 'post', f"https://krakenfiles.com/download/{hash}", data=payload, headers=headers)
526
+ dl_link_json = dl_link_resp.json()
527
+ if "url" in dl_link_json:
528
+ return dl_link_json["url"]
529
+ else:
530
+ return (
531
+ f"ERROR: Failed to acquire download URL from kraken for : {page_link}")
532
+
533
+
534
+ def uploadee(url: str) -> str:
535
+ """ uploadee direct link generator
536
+ By https://github.com/iron-heart-x"""
537
+ cget = create_scraper().request
538
+ try:
539
+ soup = BeautifulSoup(cget('get', url).content, 'lxml')
540
+ sa = soup.find('a', attrs={'id': 'd_l'})
541
+ return sa['href']
542
+ except:
543
+ return (
544
+ f"ERROR: Failed to acquire download URL from upload.ee for : {url}")
545
+
546
+
547
+ def terabox(url) -> str:
548
+ sess = session()
549
+ while True:
550
+ try:
551
+ res = sess.get(url)
552
+ print("connected")
553
+ break
554
+ except: print("retrying")
555
+ url = res.url
556
+
557
+ key = url.split('?surl=')[-1]
558
+ url = f'http://www.terabox.com/wap/share/filelist?surl={key}'
559
+ sess.cookies.update(TERA_COOKIE)
560
+
561
+ while True:
562
+ try:
563
+ res = sess.get(url)
564
+ print("connected")
565
+ break
566
+ except Exception as e: print("retrying")
567
+
568
+ key = res.url.split('?surl=')[-1]
569
+ soup = BeautifulSoup(res.content, 'lxml')
570
+ jsToken = None
571
+
572
+ for fs in soup.find_all('script'):
573
+ fstring = fs.string
574
+ if fstring and fstring.startswith('try {eval(decodeURIComponent'):
575
+ jsToken = fstring.split('%22')[1]
576
+
577
+ while True:
578
+ try:
579
+ res = sess.get(f'https://www.terabox.com/share/list?app_id=250528&jsToken={jsToken}&shorturl={key}&root=1')
580
+ print("connected")
581
+ break
582
+ except: print("retrying")
583
+ result = res.json()
584
+
585
+ if result['errno'] != 0: return f"ERROR: '{result['errmsg']}' Check cookies"
586
+ result = result['list']
587
+ if len(result) > 1: return "ERROR: Can't download mutiple files"
588
+ result = result[0]
589
+
590
+ if result['isdir'] != '0':return "ERROR: Can't download folder"
591
+ return result.get('dlink',"Error")
592
+
593
+ async def filepress(url):
594
+ async with aiohttp.ClientSession() as session:
595
+ try:
596
+ async with session.get(url) as response:
597
+ url = str(response.url)
598
+ raw = urlparse(url)
599
+ json_data = {
600
+ 'id': raw.path.split('/')[-1],
601
+ 'method': 'publicDownlaod',
602
+ }
603
+ tapi = f'{raw.scheme}://{raw.hostname}/api/file/telegram/downlaod/'
604
+ api = f'{raw.scheme}://{raw.hostname}/api/file/downlaod/'
605
+ async with session.post(api, headers={'Referer': f'{raw.scheme}://{raw.hostname}'}, json=json_data) as api_response:
606
+ res = await api_response.json()
607
+ async with session.post(tapi, headers={'Referer': f'{raw.scheme}://{raw.hostname}'}, json=json_data) as tapi_response:
608
+ res1 = await tapi_response.json()
609
+ if res1.get('data',False):
610
+ telegram=f'https://tghub.xyz/?start={res1["data"]}'
611
+ else:telegram=None
612
+ async with session.get(f"{raw.scheme}://{raw.hostname}/api/file/get/{raw.path.split('/')[-1]}", headers={'Referer': f'{raw.scheme}://{raw.hostname}'}) as details_response:
613
+ details = await details_response.json()
614
+ details=details['data']
615
+ except Exception as e:
616
+ return f'ERROR: {str(e)}'
617
+ if 'data' not in res:
618
+ return 'Drive Link not found'
619
+ index = f'https://indexlink.mrprincebotz.workers.dev/direct.aspx?id={res["data"]}'
620
+ title = details['name']
621
+ size = await humanbytes(int(details['size']))
622
+ language = details['videoFileDetails']['audioLangList']
623
+ Subtiles = details['videoFileDetails']['subLangList']
624
+ duration = None
625
+ try:
626
+ duration = await seconds_to_hhmmss(details['videoFileDetails']['duration'])
627
+ except:pass
628
+ return f'\n<b>✨ ᴛɪᴛʟᴇ:</b> {title}\n<b>📀 sɪᴢᴇ:</b> {size}\n<b>💫 ʟᴀɴɢᴜᴀɢᴇ:</b> {language}\n<b>💫 subtitles:</b> {Subtiles}\n<b>⏰ ᴅᴜʀᴀᴛɪᴏɴ:</b> {duration} \n<a href="https://drive.google.com/uc?id={res["data"]}">ᴅʀɪᴠᴇ ʟɪɴᴋ</a> | <a href="{telegram}">ᴛᴇʟᴇɢʀᴀᴍ ʟɪɴᴋ</a> | <a href="{index}">ɪɴᴅᴇx ʟɪɴᴋ</a>'
629
+ def gdtot(url):
630
+ cget = cloudscraper.create_scraper(allow_brotli=False)
631
+ try:
632
+ url = cget.get(url).url
633
+ p_url = urlparse(url)
634
+ res = cget.post(f"{p_url.scheme}://{p_url.hostname}/ddl", data={'dl': str(url.split('/')[-1])})
635
+ except Exception as e:
636
+ return(f'{e.__class__.__name__}')
637
+ if (drive_link := findall(r"myDl\('(.*?)'\)", res.text)) and "drive.google.com" in drive_link[0]:
638
+ d_link = drive_link[0]
639
+ elif GDTOT_CRYPT:
640
+ cget.get(url, cookies={'crypt': GDTOT_CRYPT})
641
+ p_url = urlparse(url)
642
+ js_script = cget.post(f"{p_url.scheme}://{p_url.hostname}/dld", data={'dwnld': url.split('/')[-1]})
643
+ g_id = findall('gd=(.*?)&', js_script.text)
644
+ try:
645
+ decoded_id = b64decode(str(g_id[0])).decode('utf-8')
646
+ except:
647
+ return("Try in your browser, mostly file not found or user limit exceeded!")
648
+ d_link = f'https://drive.google.com/open?id={decoded_id}'
649
+ else:
650
+ return('Drive Link not found, Try in your broswer! GDTOT_CRYPT not Provided!')
651
+ soup = BeautifulSoup(cget.get(url).content, "html.parser")
652
+ parse_data = (soup.select('meta[property^="og:description"]')[0]['content']).replace('Download ' , '').rsplit('-', maxsplit=1)
653
+ parse_txt = f'''┎ <b>Name :</b> <i>{parse_data[0]}</i>
654
+ ┠ <b>Size :</b> <i>{parse_data[-1]}</i>
655
+
656
+ ┠ <b>GDToT Link :</b> {url}
657
+ '''
658
+ try:
659
+ res=cget.get(url)
660
+ if (tele_link := findall(r"myDl2\('(.*?)'\)", res.text)):
661
+ print(tele_link[0])
662
+ parse_txt += f"┖ <b>Telegram Link :</b> {tele_link[0]}\n"
663
+ except:pass
664
+ parse_txt += f'┠ <b>Index Link :</b> https://indexlink.mrprincebotz.workers.dev/direct.aspx?id={get_gdriveid(d_link)}\n'
665
+ parse_txt += f"┖ <b>Drive Link :</b> {d_link}"
666
+ return parse_txt
667
+
668
+
669
+ def sharer_scraper(url):
670
+ cget = create_scraper().request
671
+ try:
672
+ url = cget('GET', url).url
673
+ raw = urlparse(url)
674
+ header = {
675
+ "useragent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/7.0.548.0 Safari/534.10"}
676
+ res = cget('GET', url, headers=header)
677
+ except Exception as e:
678
+ return (f'ERROR: {e.__class__.__name__}')
679
+ key = findall('"key",\s+"(.*?)"', res.text)
680
+ if not key:
681
+ return ("ERROR: Key not found!")
682
+ key = key[0]
683
+ if not etree.HTML(res.content).xpath("//button[@id='drc']"):
684
+ return (
685
+ "ERROR: This link don't have direct download button")
686
+ boundary = uuid4()
687
+ headers = {
688
+ 'Content-Type': f'multipart/form-data; boundary=----WebKitFormBoundary{boundary}',
689
+ 'x-token': raw.hostname,
690
+ 'useragent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/7.0.548.0 Safari/534.10'
691
+ }
692
+
693
+ data = f'------WebKitFormBoundary{boundary}\r\nContent-Disposition: form-data; name="action"\r\n\r\ndirect\r\n' \
694
+ f'------WebKitFormBoundary{boundary}\r\nContent-Disposition: form-data; name="key"\r\n\r\n{key}\r\n' \
695
+ f'------WebKitFormBoundary{boundary}\r\nContent-Disposition: form-data; name="action_token"\r\n\r\n\r\n' \
696
+ f'------WebKitFormBoundary{boundary}--\r\n'
697
+ try:
698
+ res = cget("POST", url, cookies=res.cookies,
699
+ headers=headers, data=data).json()
700
+ except Exception as e:
701
+ return (f'ERROR: {e.__class__.__name__}')
702
+ if "url" not in res:
703
+ return (
704
+ 'ERROR: Drive Link not found, Try in your broswer')
705
+ if "drive.google.com" in res["url"]:
706
+ return res["url"]
707
+ try:
708
+ res = cget('GET', res["url"])
709
+ except Exception as e:
710
+ return (f'ERROR: {e.__class__.__name__}')
711
+ html_tree = etree.HTML(res.content)
712
+ drive_link = html_tree.xpath("//a[contains(@class,'btn')]/@href")
713
+ if drive_link and "drive.google.com" in drive_link[0]:
714
+ return drive_link[0]
715
+ else:
716
+ return (
717
+ 'ERROR: Drive Link not found, Try in your broswer')
718
+
719
+
720
+ def wetransfer(url):
721
+ cget = create_scraper().request
722
+ try:
723
+ url = cget('GET', url).url
724
+ json_data = {
725
+ 'security_hash': url.split('/')[-1],
726
+ 'intent': 'entire_transfer'
727
+ }
728
+ res = cget(
729
+ 'POST', f'https://wetransfer.com/api/v4/transfers/{url.split("/")[-2]}/download', json=json_data).json()
730
+ except Exception as e:
731
+ return (f'ERROR: {e.__class__.__name__}')
732
+ if "direct_link" in res:
733
+ return res["direct_link"]
734
+ elif "message" in res:
735
+ return (f"ERROR: {res['message']}")
736
+ elif "error" in res:
737
+ return (f"ERROR: {res['error']}")
738
+ else:
739
+ return ("ERROR: cannot find direct link")
740
+
741
+
742
+ def akmfiles(url):
743
+ cget = create_scraper().request
744
+ try:
745
+ url = cget('GET', url).url
746
+ json_data = {
747
+ 'op': 'download2',
748
+ 'id': url.split('/')[-1]
749
+ }
750
+ res = cget('POST', url, data=json_data)
751
+ except Exception as e:
752
+ return (f'ERROR: {e.__class__.__name__}')
753
+ html_tree = etree.HTML(res.content)
754
+ direct_link = html_tree.xpath("//a[contains(@class,'btn btn-dow')]/@href")
755
+ if direct_link:
756
+ return direct_link[0]
757
+ else:
758
+ return ('ERROR: Direct link not found')
759
+
760
+
761
+ def shrdsk(url):
762
+ cget = create_scraper().request
763
+ try:
764
+ url = cget('GET', url).url
765
+ res = cget(
766
+ 'GET', f'https://us-central1-affiliate2apk.cloudfunctions.net/get_data?shortid={url.split("/")[-1]}')
767
+ except Exception as e:
768
+ return (f'ERROR: {e.__class__.__name__}')
769
+ if res.status_code != 200:
770
+ return (
771
+ f'ERROR: Status Code {res.status_code}')
772
+ res = res.json()
773
+ if ("type" in res and res["type"].lower() == "upload" and "video_url" in res):
774
+ return res["video_url"]
775
+ return ("ERROR: cannot find direct link")
776
+
777
+
778
+ def linkbox(url):
779
+ cget = create_scraper().request
780
+ try:
781
+ url = cget('GET', url).url
782
+ res = cget(
783
+ 'GET', f'https://www.linkbox.to/api/file/detail?itemId={url.split("/")[-1]}').json()
784
+ except Exception as e:
785
+ return (f'ERROR: {e.__class__.__name__}')
786
+ if 'data' not in res:
787
+ return ('ERROR: Data not found!!')
788
+ data = res['data']
789
+ if not data:
790
+ return ('ERROR: Data is None!!')
791
+ if 'itemInfo' not in data:
792
+ return ('ERROR: itemInfo not found!!')
793
+ itemInfo = data['itemInfo']
794
+ if 'url' not in itemInfo:
795
+ return ('ERROR: url not found in itemInfo!!')
796
+ if "name" not in itemInfo:
797
+ return (
798
+ 'ERROR: Name not found in itemInfo!!')
799
+ name = quote(itemInfo["name"])
800
+ raw = itemInfo['url'].split("/", 3)[-1]
801
+ return f'https://wdl.nuplink.net/{raw}&filename={name}'
802
+
803
+
804
+ def zippyshare(url):
805
+ cget = create_scraper().request
806
+ try:
807
+ url = cget('GET', url).url
808
+ resp = cget('GET', url)
809
+ except Exception as e:
810
+ return (f'ERROR: {e.__class__.__name__}')
811
+ if not resp.ok:
812
+ return (
813
+ 'ERROR: Something went wrong!!, Try in your browser')
814
+ if findall(r'>File does not exist on this server<', resp.text):
815
+ return (
816
+ 'ERROR: File does not exist on server!!, Try in your browser')
817
+ pages = etree.HTML(resp.text).xpath(
818
+ "//script[contains(text(),'dlbutton')][3]/text()")
819
+ if not pages:
820
+ return ('ERROR: Page not found!!')
821
+ js_script = pages[0]
822
+ uri1 = None
823
+ uri2 = None
824
+ method = ''
825
+ omg = findall(r"\.omg.=.(.*?);", js_script)
826
+ var_a = findall(r"var.a.=.(\d+)", js_script)
827
+ var_ab = findall(r"var.[ab].=.(\d+)", js_script)
828
+ unknown = findall(r"\+\((.*?).\+", js_script)
829
+ unknown1 = findall(r"\+.\((.*?)\).\+", js_script)
830
+ if omg:
831
+ omg = omg[0]
832
+ method = f'omg = {omg}'
833
+ mtk = (eval(omg) * (int(omg.split("%")[0]) % 3)) + 18
834
+ uri1 = findall(r'"/(d/\S+)/"', js_script)
835
+ uri2 = findall(r'\/d.*?\+"/(\S+)";', js_script)
836
+ elif var_a:
837
+ var_a = var_a[0]
838
+ method = f'var_a = {var_a}'
839
+ mtk = int(pow(int(var_a), 3) + 3)
840
+ uri1 = findall(r"\.href.=.\"/(.*?)/\"", js_script)
841
+ uri2 = findall(r"\+\"/(.*?)\"", js_script)
842
+ elif var_ab:
843
+ a = var_ab[0]
844
+ b = var_ab[1]
845
+ method = f'a = {a}, b = {b}'
846
+ mtk = eval(f"{floor(int(a)/3) + int(a) % int(b)}")
847
+ uri1 = findall(r"\.href.=.\"/(.*?)/\"", js_script)
848
+ uri2 = findall(r"\)\+\"/(.*?)\"", js_script)
849
+ elif unknown:
850
+ method = f'unknown = {unknown[0]}'
851
+ mtk = eval(f"{unknown[0]}+ 11")
852
+ uri1 = findall(r"\.href.=.\"/(.*?)/\"", js_script)
853
+ uri2 = findall(r"\)\+\"/(.*?)\"", js_script)
854
+ elif unknown1:
855
+ method = f'unknown1 = {unknown1[0]}'
856
+ mtk = eval(unknown1[0])
857
+ uri1 = findall(r"\.href.=.\"/(.*?)/\"", js_script)
858
+ uri2 = findall(r"\+.\"/(.*?)\"", js_script)
859
+ else:
860
+ return ("ERROR: Direct link not found")
861
+ if not any([uri1, uri2]):
862
+ return (
863
+ f"ERROR: uri1 or uri2 not found with method {method}")
864
+ domain = urlparse(url).hostname
865
+ return f"https://{domain}/{uri1[0]}/{mtk}/{uri2[0]}"
docker-compose.yml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ version: "3.8"
2
+ services:
3
+ worker:
4
+ build: .
5
+
6
+
7
+
log.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ 26-Nov-23 08:29:34 AM - root - ERROR - Something went wrong while updating, check UPSTREAM_REPO if valid or not! [update.py:37]
2
+ 26-Nov-23 08:34:22 AM - root - ERROR - Something went wrong while updating, check UPSTREAM_REPO if valid or not! [update.py:37]
3
+ 26-Nov-23 08:34:57 AM - root - ERROR - Something went wrong while updating, check UPSTREAM_REPO if valid or not! [update.py:37]
main.py ADDED
@@ -0,0 +1,681 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #7Z@QVcA^X*jV
2
+ #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
3
+ import pyrogram
4
+ from pyrogram import Client
5
+ from pyrogram import filters, enums
6
+ from pyrogram.types import InlineKeyboardMarkup,InlineKeyboardButton
7
+ from pyrogram.errors import UserNotParticipant
8
+ from pymongo import MongoClient
9
+ import bypasser
10
+ import os
11
+ import ddl
12
+ from pyrogram.enums.parse_mode import ParseMode
13
+ import requests
14
+ import threading
15
+ from texts import HELP_TEXT,RESTART_TXT,START_TEXT,SHORT_TEXT,ABOUT_TEXT
16
+ from ddl import ddllist
17
+ import re
18
+ from broadcast_helper import broadcast_messages
19
+ import pytz
20
+ from datetime import date, timedelta
21
+ import time
22
+ import asyncio
23
+ from database import Database
24
+ from scraper import scrape
25
+ import io
26
+ import sys
27
+ import traceback
28
+ import subprocess
29
+ from asyncio import create_subprocess_exec, create_subprocess_shell, run_coroutine_threadsafe, sleep
30
+ from asyncio.subprocess import PIPE
31
+ from io import BytesIO
32
+ from time import time
33
+ from re import match
34
+ from sys import executable, argv
35
+ from os import execl,path as ospath
36
+ from asyncio import create_task, gather
37
+
38
+
39
+
40
+
41
+ # bot
42
+ bot_token = os.environ.get("TOKEN", "6394626120:AAHVtg8PoSU_SKY6NY8C2HYXYdH0xm_wOks")
43
+ #bot_token = os.environ.get("TOKEN", "5981576988:AAEicuu56o0wk3sVTtSauMN4wX6QYd2HMig")
44
+ api_hash = os.environ.get("HASH", "8b446e569ff634428df4ad723d01b7fd")
45
+ api_id = os.environ.get("ID", "25094651")
46
+ OWNER_ID = os.environ.get("OWNER_ID", "6131675384")
47
+ ADMIN_LIST = [int(ch) for ch in (os.environ.get("ADMIN_LIST", f"{OWNER_ID} 661054276")).split()]
48
+ OWNER_USERNAME = os.environ.get("OWNER_USERNAME", "MrPrinceSanji04")
49
+ PERMANENT_GROUP = os.environ.get("PERMANENT_GROUP", "-999549719")
50
+ GROUP_ID = [int(ch) for ch in (os.environ.get("GROUP_ID", f"{PERMANENT_GROUP}")).split()]
51
+ UPDATES_CHANNEL = str(os.environ.get("UPDATES_CHANNEL", None))
52
+ LOG_CHANNEL = int(os.environ.get('LOG_CHANNEL', -1001975074655))
53
+ name = str(os.environ.get('name', 'Bypass'))
54
+ JACK_ID='661054276'
55
+ db_url = os.environ.get("DATABASE_URL", "mongodb+srv://herukotest:herukotest@test.trmvd8p.mongodb.net/?retryWrites=true&w=majority")
56
+ #db_url = os.environ.get("DATABASE_URL", "mongodb+srv://testfiletolink:testfiletolink@file.k0gf5py.mongodb.net/?retryWrites=true&w=majority")
57
+
58
+ app = Client("my_bot",api_id=api_id, api_hash=api_hash,bot_token=bot_token)
59
+
60
+ # db setup
61
+ m_client = MongoClient(db_url)
62
+ db = m_client['bypass12']
63
+ collection = db['users421']
64
+
65
+ if collection.find_one({"role":"admin"}):
66
+ pass
67
+ else:
68
+ document = {"role":"admin","value":ADMIN_LIST}
69
+ collection.insert_one(document)
70
+
71
+ if collection.find_one({"role":"auth_chat"}):
72
+ pass
73
+ else:
74
+ document = {"role":"auth_chat","value":GROUP_ID}
75
+ collection.insert_one(document)
76
+ ##############################################################################################################################
77
+ START_BUTTONS = InlineKeyboardMarkup(
78
+ [[
79
+ InlineKeyboardButton('× ✨Jᴏɪɴ Oᴜʀ Mᴀɪɴ Cʜᴀɴɴᴇʟ✨ ×', url=f'https://t.me/MrPrinceBotz'),
80
+ InlineKeyboardButton('× ✨sᴜᴘᴘᴏʀᴛ ɢʀᴏᴜᴘ✨ ×', url=f'https://t.me/MrPrinceSupport'),
81
+ ],
82
+ [
83
+ InlineKeyboardButton('ғᴇᴀᴛᴜʀᴇs', callback_data='shr'),
84
+ ],
85
+ [
86
+ InlineKeyboardButton('Aʙᴏᴜᴛ', callback_data='about'),
87
+ InlineKeyboardButton('Cʟᴏsᴇ', callback_data='close')
88
+ ]]
89
+ )
90
+ # HELP_BUTTONS = InlineKeyboardMarkup(
91
+ # [[
92
+ # InlineKeyboardButton('Hᴏᴍᴇ', callback_data='home'),
93
+ # InlineKeyboardButton('Aʙᴏᴜᴛ', callback_data='about'),
94
+ # InlineKeyboardButton('Cʟᴏsᴇ', callback_data='close')
95
+ # ]]
96
+ # )
97
+ ABOUT_BUTTONS = InlineKeyboardMarkup(
98
+ [[
99
+ InlineKeyboardButton('Hᴏᴍᴇ', callback_data='home'),
100
+ InlineKeyboardButton('Cʟᴏsᴇ', callback_data='close')
101
+ ]]
102
+ )
103
+
104
+ @app.on_callback_query()
105
+ async def cb_data(bot, update):
106
+ if update.data == "home":
107
+ await update.message.edit_text(
108
+ text=START_TEXT.format(update.from_user.mention),
109
+ disable_web_page_preview=True,
110
+ reply_markup=START_BUTTONS
111
+ )
112
+ elif update.data == "shr":
113
+ await update.message.edit_text(
114
+ text=SHORT_TEXT,
115
+ disable_web_page_preview=True,
116
+ reply_markup=ABOUT_BUTTONS
117
+ )
118
+ elif update.data == "about":
119
+ await update.message.edit_text(
120
+ text=ABOUT_TEXT,
121
+ disable_web_page_preview=True,
122
+ reply_markup=ABOUT_BUTTONS
123
+ )
124
+
125
+ else:
126
+ await update.message.delete()
127
+
128
+
129
+ # handle ineex
130
+ def handleIndex(ele,message,msg):
131
+ result = bypasser.scrapeIndex(ele)
132
+ try: app.delete_messages(message.chat.id, msg.id)
133
+ except: pass
134
+ for page in result: app.send_message(message.chat.id, page, reply_to_message_id=message.id, disable_web_page_preview=True)
135
+
136
+ def convert_time(seconds):
137
+ mseconds = seconds * 1000
138
+ periods = [('d', 86400000), ('h', 3600000), ('m', 60000), ('s', 1000), ('ms', 1)]
139
+ result = ''
140
+ for period_name, period_seconds in periods:
141
+ if mseconds >= period_seconds:
142
+ period_value, mseconds = divmod(mseconds, period_seconds)
143
+ result += f'{int(period_value)}{period_name}'
144
+ if result == '':
145
+ return '0ms'
146
+ return result
147
+
148
+ def is_share_link(url):
149
+ return bool(re.search(r'pixeldrain\.com|gofile\.io|toonshub\.xyz|toonshub\.link|www\.toonshub\.link|www\.toonshub\.xyz|toonworld4all\.me|www\.instagram|youtu|www\.youtu|www\.youtube|indexlink|d\.terabox|mega\.nz|t\.me|telegram|workers\.dev', url))
150
+ # loop thread
151
+ def loopthread(message):
152
+
153
+
154
+ urls = []
155
+ for ele in message.text.split():
156
+ if "http://" in ele or "https://" in ele:
157
+ urls.append(ele)
158
+ if len(urls) == 0: return
159
+ if bypasser.ispresent(ddllist,urls[0]):
160
+ msg = app.send_message(message.chat.id, "⚡ __generating...__", reply_to_message_id=message.id)
161
+ else:
162
+ if urls[0] in "https://olamovies" or urls[0] in "https://psa.wf/":
163
+ msg = app.send_message(message.chat.id, "🔎 __this might take some time...__", reply_to_message_id=message.id)
164
+ else:
165
+ msg = app.send_message(message.chat.id, f"🔎 __bypassing...__ {urls[0]}", reply_to_message_id=message.id)
166
+
167
+ link = ""
168
+ fails=''
169
+ print(f'urls :{urls}')
170
+ start=time()
171
+ for ele in urls:
172
+ while True:
173
+ temp=f'┏ <b>sᴏᴜʀᴄᴇ ʟɪɴᴋ :</b> {ele} \n┗ <b>ʙʏᴘᴀss ʟɪɴᴋ :</b> '
174
+ if re.search(r"https?:\/\/(?:[\w.-]+)?\.\w+\/\d+:", ele):
175
+ handleIndex(ele,message,msg)
176
+ return
177
+ elif bypasser.ispresent(ddllist,ele):
178
+ print('endered dll')
179
+ try: tem = asyncio.run(ddl.direct_link_generator(ele))
180
+ except Exception as e: tem = "**Error**: " + str(e)
181
+ else:
182
+ print('entered bypass')
183
+ try: tem = asyncio.run(bypasser.shortners(ele))
184
+ except Exception as e: tem = "**Error**: " + str(e)
185
+ print("bypassed:",tem)
186
+ temp+=tem
187
+ link = link + temp + "\n\n"
188
+ if is_share_link(tem) or 'Not in Supported Sites' in tem or 'https://' not in tem:
189
+ break
190
+ ele=tem
191
+ link = link +"━━━━━━━━━━━❅━━━━━━━━━━━\n\n"
192
+ end=time()
193
+ timetaken=convert_time(end-start)
194
+ print(timetaken)
195
+ reqstr = app.get_users(message.from_user.id)
196
+ v=f'Rᴇᴏ̨ᴜᴇsᴛᴇᴅ Bʏ: {reqstr.mention}\nᴛᴏᴛᴀʟ ʟɪɴᴋs: {len(urls)}\nᴛɪᴍᴇ ᴛᴀᴋᴇɴ: <code>{timetaken}</code>'
197
+ try:
198
+ app.edit_message_text(message.chat.id, msg.id,f'<b>❅━━━「 <a href="https://t.me/+hkU5gzW29BMzNmZl">MʀPʀɪɴᴄᴇ Bᴏᴛᴢ</a> 」━━━❅</b>\n\n{link}\n<b>{v}\n🤩 Pᴏᴡᴇʀᴇᴅ ʙʏ @MrPrinceBotz</b>', disable_web_page_preview=True,
199
+ reply_markup=InlineKeyboardMarkup(
200
+ [[InlineKeyboardButton(" Uᴘᴅᴀᴛᴇ Cʜᴀɴɴᴇʟ ✴", url=f"https://t.me/+hkU5gzW29BMzNmZl")]]
201
+ )
202
+ )
203
+ app.send_message(chat_id=LOG_CHANNEL, text=f"Log of {message.chat.title}:\n\nName : {reqstr.mention}\n\nID : {message.from_user.id}\n\nBypassed Links: {link}\n\n{fails}", disable_web_page_preview=True)
204
+ except:
205
+ app.send_message(message.chat.id, "Failed to Bypass")
206
+
207
+ @app.on_message(filters.command(["search"]))
208
+ def send_start(client: pyrogram.client.Client, message: pyrogram.types.messages_and_media.message.Message):
209
+ msg=message.text
210
+ if '@MrPrince_Link_Bypass_bot' in msg:
211
+ msg=msg.split('/search@MrPrince_Link_Bypass_bot ')[-1]
212
+ else:
213
+ msg=msg.split('/search ')[-1]
214
+ app.send_message(message.chat.id,scrape(msg),disable_web_page_preview=True)
215
+ @app.on_message(filters.command('restart'))
216
+ async def restart(client: pyrogram.client.Client, message: pyrogram.types.messages_and_media.message.Message):
217
+ print('restarting')
218
+ restart_message = await message.reply('<i>Restarting...</i>')
219
+ await (await create_subprocess_exec('python3', 'update.py')).wait()
220
+ await cmd_exec('rm rm -rf my_bot.session __pycache__/',shell=True)
221
+ execl(executable, executable,"main.py")
222
+
223
+
224
+ async def aexec(code, client, message):
225
+ exec(
226
+ "async def __aexec(client, message): "
227
+ + "".join(f"\n {l_}" for l_ in code.split("\n"))
228
+ )
229
+ return await locals()["__aexec"](client, message)
230
+
231
+
232
+ @app.on_message(filters.command('eval', prefixes='!'))
233
+ #@app.on_message(filters.command(["eval"]))
234
+ async def send_start(client: pyrogram.client.Client, message: pyrogram.types.messages_and_media.message.Message):
235
+ status_message = await message.reply_text("Processing ...")
236
+ cmd = message.text.split(" ", maxsplit=1)[1]
237
+ reply_to_ = message
238
+ if message.reply_to_message:
239
+ reply_to_ = message.reply_to_message
240
+ old_stderr = sys.stderr
241
+ old_stdout = sys.stdout
242
+ redirected_output = sys.stdout = io.StringIO()
243
+ redirected_error = sys.stderr = io.StringIO()
244
+ stdout, stderr, exc = None, None, None
245
+ try:
246
+ await aexec(cmd, client, message)
247
+ except Exception:
248
+ exc = traceback.format_exc()
249
+ stdout = redirected_output.getvalue()
250
+ stderr = redirected_error.getvalue()
251
+ sys.stdout = old_stdout
252
+ sys.stderr = old_stderr
253
+ evaluation = ""
254
+ if exc:
255
+ evaluation = exc
256
+ elif stderr:
257
+ evaluation = stderr
258
+ elif stdout:
259
+ evaluation = stdout
260
+ else:
261
+ evaluation = "Success"
262
+ final_output = "<b>EVAL</b>: "
263
+ final_output += f"<code>{cmd}</code>\n\n"
264
+ final_output += "<b>OUTPUT</b>:\n"
265
+ final_output += f"<code>{evaluation.strip()}</code> \n"
266
+ if len(final_output) > 4096:
267
+ with io.BytesIO(str.encode(final_output)) as out_file:
268
+ out_file.name = "eval.txt"
269
+ await reply_to_.reply_document(
270
+ document=out_file, caption=cmd, disable_notification=True
271
+ )
272
+ else:
273
+ await reply_to_.reply_text(final_output)
274
+ await status_message.delete()
275
+
276
+ async def cmd_exec(cmd, shell=False):
277
+ if shell:
278
+ proc = await create_subprocess_shell(cmd, stdout=PIPE, stderr=PIPE)
279
+ else:
280
+ proc = await create_subprocess_exec(*cmd, stdout=PIPE, stderr=PIPE)
281
+ stdout, stderr = await proc.communicate()
282
+ stdout = stdout.decode().strip()
283
+ stderr = stderr.decode().strip()
284
+ return stdout, stderr, proc.returncode
285
+
286
+ @app.on_message(filters.command('sh', prefixes='!'))
287
+ async def shell(_, message):
288
+ cmd = message.text.split(maxsplit=1)
289
+ if len(cmd) == 1:
290
+ await app.send_message(message.chat.id, 'No command to execute was given.')
291
+ return
292
+ cmd = cmd[1]
293
+ # Check if it's an environment variable setting command
294
+ if cmd.startswith('export '):
295
+ parts = cmd.split(' ', 1)
296
+ if len(parts) == 2:
297
+ var, value = parts[1].split('=', 1)
298
+ os.environ[var] = value
299
+ await app.send_message(message.chat.id, f"Set environment variable {var} to {value}")
300
+ else:
301
+ await app.send_message(message.chat.id, 'Invalid command to set environment variable.')
302
+ else:
303
+ stdout, stderr, _ = await cmd_exec(cmd, shell=True)
304
+ reply = ''
305
+ if len(stdout) != 0:
306
+ reply += f"*Stdout*\n{stdout}\n"
307
+ #LOGGER.info(f"Shell - {cmd} - {stdout}")
308
+ print(f"Shell - {cmd} - {stdout}")
309
+ if len(stderr) != 0:
310
+ reply += f"*Stderr*\n{stderr}"
311
+ #LOGGER.error(f"Shell - {cmd} - {stderr}")
312
+ print(f"Shell - {cmd} - {stderr}")
313
+ if len(reply) > 3000:
314
+ with BytesIO(str.encode(reply)) as out_file:
315
+ out_file.name = "shell_output.txt"
316
+ await app.send_document(message.chat.id, out_file)
317
+ elif len(reply) != 0:
318
+ await app.send_message(message.chat.id, reply)
319
+ else:
320
+ await app.send_message(message.chat.id, 'No Reply')
321
+
322
+
323
+ # start command
324
+ @app.on_message(filters.command(["start"]))
325
+ async def send_start(client: pyrogram.client.Client, message: pyrogram.types.messages_and_media.message.Message):
326
+ result = collection.find_one({"role":"auth_chat"})
327
+ GROUP_ID = result["value"]
328
+ if not await db.is_user_exist(message.from_user.id):
329
+ await db.add_user(message.from_user.id)
330
+ await client.send_message(
331
+ LOG_CHANNEL,
332
+ f"**Nᴇᴡ Usᴇʀ Jᴏɪɴᴇᴅ:** \n\n__Mʏ Nᴇᴡ Fʀɪᴇɴᴅ__ [{message.from_user.first_name}](tg://user?id={message.from_user.id}) __Sᴛᴀʀᴛᴇᴅ Yᴏᴜʀ Bᴏᴛ !!__"
333
+ )
334
+ if str(message.chat.id).startswith("-100") and message.chat.id not in GROUP_ID:
335
+ return
336
+ elif message.chat.id not in GROUP_ID:
337
+ if UPDATES_CHANNEL != "None":
338
+ try:
339
+ user = await app.get_chat_member(UPDATES_CHANNEL, message.chat.id)
340
+ if user.status == enums.ChatMemberStatus.BANNED:
341
+ await app.send_message(
342
+ chat_id=message.chat.id,
343
+ text=f"__Sorry, you are banned. Contact My [ Owner ](https://telegram.me/{OWNER_USERNAME})__",
344
+ disable_web_page_preview=True
345
+ )
346
+ return
347
+ except UserNotParticipant:
348
+ await app.send_message(
349
+ chat_id=message.chat.id,
350
+ text="<i>🔐 Join Channel To Use Me 🔐</i>",
351
+ reply_markup=InlineKeyboardMarkup(
352
+ [
353
+ [
354
+ InlineKeyboardButton("🔓 Join Now 🔓", url=f"https://t.me/{UPDATES_CHANNEL}")
355
+ ]
356
+ ]
357
+ ),
358
+
359
+ )
360
+ return
361
+ except Exception:
362
+ await app.send_message(
363
+ chat_id=message.chat.id,
364
+ text=f"<i>Something went wrong</i> <b> <a href='https://telegram.me/{OWNER_USERNAME}'>CLICK HERE FOR SUPPORT </a></b>",
365
+
366
+ disable_web_page_preview=True)
367
+ return
368
+ await message.reply_photo(
369
+ photo='https://telegra.ph/file/84831ad030f1f607b2232.png',
370
+ caption=START_TEXT.format(message.from_user.mention),
371
+ reply_markup=START_BUTTONS,
372
+ parse_mode=enums.ParseMode.HTML
373
+ )
374
+
375
+ # help command
376
+ @app.on_message(filters.command(["help"]))
377
+ async def send_help(client: pyrogram.client.Client, message: pyrogram.types.messages_and_media.message.Message):
378
+ result = collection.find_one({"role":"auth_chat"})
379
+ GROUP_ID = result["value"]
380
+ if not await db.is_user_exist(message.from_user.id):
381
+ await db.add_user(message.from_user.id)
382
+ await client.send_message(
383
+ LOG_CHANNEL,
384
+ f"**Nᴇᴡ Usᴇʀ Jᴏɪɴᴇᴅ:** \n\n__Mʏ Nᴇᴡ Fʀɪᴇɴᴅ__ [{message.from_user.first_name}](tg://user?id={message.from_user.id}) __Sᴛᴀʀᴛᴇᴅ Yᴏᴜʀ Bᴏᴛ !!__"
385
+ )
386
+ if str(message.chat.id).startswith("-100") and message.chat.id not in GROUP_ID:
387
+ return
388
+ elif message.chat.id not in GROUP_ID:
389
+ if UPDATES_CHANNEL != "None":
390
+ try:
391
+ user = await app.get_chat_member(UPDATES_CHANNEL, message.chat.id)
392
+ if user.status == enums.ChatMemberStatus.BANNED:
393
+ await app.send_message(
394
+ chat_id=message.chat.id,
395
+ text=f"__Sorry, you are banned. Contact My [ Owner ](https://telegram.me/{OWNER_USERNAME})__",
396
+ disable_web_page_preview=True
397
+ )
398
+ return
399
+ except UserNotParticipant:
400
+ await app.send_message(
401
+ chat_id=message.chat.id,
402
+ text="<i>🔐 Join Channel To Use Me 🔐</i>",
403
+ reply_markup=InlineKeyboardMarkup(
404
+ [
405
+ [
406
+ InlineKeyboardButton("🔓 Join Now 🔓", url=f"https://t.me/{UPDATES_CHANNEL}")
407
+ ]
408
+ ]
409
+ ),
410
+
411
+ )
412
+ return
413
+ except Exception:
414
+ await app.send_message(
415
+ chat_id=message.chat.id,
416
+ text=f"<i>Something went wrong</i> <b> <a href='https://telegram.me/{OWNER_USERNAME}'>CLICK HERE FOR SUPPORT </a></b>",
417
+
418
+ disable_web_page_preview=True)
419
+ return
420
+ await app.send_message(message.chat.id, HELP_TEXT, reply_to_message_id=message.id, disable_web_page_preview=True)
421
+
422
+ @app.on_message(filters.command(["authorize"]))
423
+ async def send_help(client: pyrogram.client.Client, message: pyrogram.types.messages_and_media.message.Message):
424
+
425
+ result = collection.find_one({"role":"admin"})
426
+ ADMIN_LIST = result["value"]
427
+ result = collection.find_one({"role":"auth_chat"})
428
+ GROUP_ID = result["value"]
429
+ if message.chat.id in ADMIN_LIST or message.from_user.id in ADMIN_LIST :
430
+ try :
431
+ msg = int(message.text.split()[-1])
432
+ except ValueError:
433
+ await app.send_message(message.chat.id, f"Example\n<code>/authorize -100</code>", reply_to_message_id=message.id, disable_web_page_preview=True)
434
+ return
435
+ if msg in GROUP_ID:
436
+ await app.send_message(message.chat.id, f"Already Added", reply_to_message_id=message.id, disable_web_page_preview=True)
437
+ else :
438
+ GROUP_ID.append(msg)
439
+ collection.update_one({"role":"auth_chat"}, {"$set": {"value":GROUP_ID}}, upsert=True)
440
+ await app.send_message(message.chat.id, f"Authorized Sucessfully!", reply_to_message_id=message.id, disable_web_page_preview=True)
441
+ else:
442
+ await app.send_message(message.chat.id, f"This Command Is Only For Admins", reply_to_message_id=message.id, disable_web_page_preview=True)
443
+
444
+ @app.on_message(filters.command("unauthorize"))
445
+ async def send_help(client: pyrogram.client.Client, message: pyrogram.types.messages_and_media.message.Message):
446
+ result = collection.find_one({"role":"admin"})
447
+
448
+ ADMIN_LIST = result["value"]
449
+ result = collection.find_one({"role":"auth_chat"})
450
+ GROUP_ID = result["value"]
451
+ if message.chat.id in ADMIN_LIST or message.from_user.id in ADMIN_LIST :
452
+ try :
453
+ msg = int(message.text.split()[-1])
454
+ except ValueError:
455
+ await app.send_message(message.chat.id, f"Example\n<code>/unauthorize -100</code>", reply_to_message_id=message.id, disable_web_page_preview=True)
456
+ return
457
+ if msg not in GROUP_ID:
458
+ await app.send_message(message.chat.id, f"Already Removed", reply_to_message_id=message.id, disable_web_page_preview=True)
459
+ else :
460
+ if msg == int(PERMANENT_GROUP) :
461
+ await app.send_message(message.chat.id, f"Even Owner Can't Remove This {msg} Chat 😂😂", reply_to_message_id=message.id, disable_web_page_preview=True)
462
+ return
463
+ GROUP_ID.remove(msg)
464
+ collection.update_one({"role":"auth_chat"}, {"$set": {"value":GROUP_ID}}, upsert=True)
465
+ await app.send_message(message.chat.id, f"Unauthorized!", reply_to_message_id=message.id, disable_web_page_preview=True)
466
+ else:
467
+ await app.send_message(message.chat.id, f"This Command Is Only For Admins", reply_to_message_id=message.id, disable_web_page_preview=True)
468
+
469
+ @app.on_message(filters.command(["addsudo"]))
470
+ async def send_help(client: pyrogram.client.Client, message: pyrogram.types.messages_and_media.message.Message):
471
+ result = collection.find_one({"role":"admin"})
472
+
473
+ ADMIN_LIST = result["value"]
474
+ if message.chat.id == int(OWNER_ID) or message.from_user.id == int(OWNER_ID) :
475
+ try :
476
+ msg = int(message.text.split()[-1])
477
+ except ValueError:
478
+ await app.send_message(message.chat.id, f"Example\n<code>/addsudo 123</code>", reply_to_message_id=message.id, disable_web_page_preview=True)
479
+ return
480
+ if msg in ADMIN_LIST:
481
+ await app.send_message(message.chat.id, f"Already Admin", reply_to_message_id=message.id, disable_web_page_preview=True)
482
+ else :
483
+ ADMIN_LIST.append(msg)
484
+ collection.update_one({"role":"admin"}, {"$set": {"value":ADMIN_LIST}}, upsert=True)
485
+ await app.send_message(message.chat.id, f"Promoted As Admin", reply_to_message_id=message.id, disable_web_page_preview=True)
486
+ else:
487
+ await app.send_message(message.chat.id, f"This Command Is Only For Owner", reply_to_message_id=message.id, disable_web_page_preview=True)
488
+
489
+ @app.on_message(filters.command(["remsudo"]))
490
+ async def send_help(client: pyrogram.client.Client, message: pyrogram.types.messages_and_media.message.Message):
491
+ result = collection.find_one({"role":"admin"})
492
+
493
+ ADMIN_LIST = result["value"]
494
+ if message.chat.id == int(OWNER_ID) or message.from_user.id == int(OWNER_ID) :
495
+ try :
496
+ msg = int(message.text.split()[-1])
497
+ except ValueError:
498
+ await app.send_message(message.chat.id, f"Example\n<code>/remsudo 123</code>", reply_to_message_id=message.id, disable_web_page_preview=True)
499
+ return
500
+ if msg not in ADMIN_LIST:
501
+ await app.send_message(message.chat.id, f"Already Demoted!", reply_to_message_id=message.id, disable_web_page_preview=True)
502
+ else :
503
+ if msg == int(message.from_user.id) :
504
+ await app.send_message(message.chat.id, f"You Can't Remove Yourself 😂😂", reply_to_message_id=message.id, disable_web_page_preview=True)
505
+ return
506
+ elif msg == int(OWNER_ID) :
507
+ await app.send_message(message.chat.id, f"Even Owner Can't Remove Himself 😂😂", reply_to_message_id=message.id, disable_web_page_preview=True)
508
+ return
509
+ ADMIN_LIST.remove(msg)
510
+ collection.update_one({"role":"admin"}, {"$set": {"value":ADMIN_LIST}}, upsert=True)
511
+ await app.send_message(message.chat.id, f"Demoted!", reply_to_message_id=message.id, disable_web_page_preview=True)
512
+ else:
513
+ await app.send_message(message.chat.id, f"This Command Is Only For Owner", reply_to_message_id=message.id, disable_web_page_preview=True)
514
+
515
+ @app.on_message(filters.command(["users"]))
516
+ async def send_help(client: pyrogram.client.Client, message: pyrogram.types.messages_and_media.message.Message):
517
+ result = collection.find_one({"role":"admin"})
518
+
519
+ ADMIN_LIST = result["value"]
520
+ result = collection.find_one({"role":"auth_chat"})
521
+ GROUP_ID = result["value"]
522
+ if message.chat.id in ADMIN_LIST or message.from_user.id in ADMIN_LIST :
523
+ lol = "List Of Authorized Chats\n\n"
524
+ for i in GROUP_ID:
525
+ lol += "<code>" + str(i) + "</code>\n"
526
+ lol += "\nList Of Admin ID's\n\n"
527
+ for i in ADMIN_LIST:
528
+ lol += "<code>" + str(i) + "</code>\n"
529
+ await app.send_message(message.chat.id, lol, reply_to_message_id=message.id, disable_web_page_preview=True)
530
+ else :
531
+ await app.send_message(message.chat.id, f"This Command Is Only For Admins", reply_to_message_id=message.id, disable_web_page_preview=True)
532
+
533
+ ###############
534
+ db = Database(db_url, name)
535
+ ADMINS='661054276'
536
+ @app.on_message(filters.command(["broadcast123"]))
537
+ async def send_help(client: pyrogram.client.Client, message: pyrogram.types.messages_and_media.message.Message):
538
+ print('hi')
539
+ db = Database(db_url, name)
540
+ users = await db.get_all_users()
541
+ b_msg = message.reply_to_message
542
+ sts = await message.reply_text(
543
+ text='Broadcasting your messages...'
544
+ )
545
+ start_time = time()
546
+ total_users = await db.total_users_count()
547
+ done = 0
548
+ blocked = 0
549
+ deleted = 0
550
+ failed =0
551
+
552
+ success = 0
553
+ async for user in users:
554
+ pti, sh = await broadcast_messages(int(user['id']), b_msg)
555
+ if pti:
556
+ success += 1
557
+ elif pti == False:
558
+ if sh == "Blocked":
559
+ blocked+=1
560
+ elif sh == "Deleted":
561
+ deleted += 1
562
+ elif sh == "Error":
563
+ failed += 1
564
+ done += 1
565
+ await asyncio.sleep(2)
566
+ if not done % 20:
567
+ await sts.edit(f"Broadcast in progress:\n\nTotal Users {total_users}\nCompleted: {done} / {total_users}\nSuccess: {success}\nBlocked: {blocked}\nDeleted: {deleted}")
568
+ time_taken = timedelta(seconds=int(time()-start_time))
569
+ await sts.edit(f"Broadcast Completed:\nCompleted in {time_taken} seconds.\n\nTotal Users {total_users}\nCompleted: {done} / {total_users}\nSuccess: {success}\nBlocked: {blocked}\nDeleted: {deleted}")
570
+
571
+ ############
572
+
573
+ # links
574
+ @app.on_message(filters.text)
575
+ async def receive(client: pyrogram.client.Client, message: pyrogram.types.messages_and_media.message.Message):
576
+ result = collection.find_one({"role":"auth_chat"})
577
+ GROUP_ID = result["value"]
578
+ if not await db.is_user_exist(message.from_user.id):
579
+ await db.add_user(message.from_user.id)
580
+ await client.send_message(
581
+ LOG_CHANNEL,
582
+ f"**Nᴇᴡ Usᴇʀ Jᴏɪɴᴇᴅ:** \n\n__Mʏ Nᴇᴡ Fʀɪᴇɴᴅ__ [{message.from_user.first_name}](tg://user?id={message.from_user.id}) __Sᴛᴀʀᴛᴇᴅ Yᴏᴜʀ Bᴏᴛ !!__"
583
+ )
584
+ if str(message.chat.id).startswith("-100") and message.chat.id not in GROUP_ID:
585
+ return
586
+ elif message.chat.id not in GROUP_ID:
587
+ if UPDATES_CHANNEL != "None":
588
+ try:
589
+ user = await app.get_chat_member(UPDATES_CHANNEL, message.chat.id)
590
+ if user.status == enums.ChatMemberStatus.BANNED:
591
+ await app.send_message(
592
+ chat_id=message.chat.id,
593
+ text=f"__Sorry, you are banned. Contact My [ Owner ](https://telegram.me/{OWNER_USERNAME})__",
594
+ disable_web_page_preview=True
595
+ )
596
+ return
597
+ except UserNotParticipant:
598
+ await app.send_message(
599
+ chat_id=message.chat.id,
600
+ text="<i>🔐 Join Channel To Use Me 🔐</i>",
601
+ reply_markup=InlineKeyboardMarkup(
602
+ [
603
+ [
604
+ InlineKeyboardButton("🔓 Join Now 🔓", url=f"https://t.me/{UPDATES_CHANNEL}")
605
+ ]
606
+ ]
607
+ ),
608
+
609
+ )
610
+ return
611
+ except Exception:
612
+ await app.send_message(
613
+ chat_id=message.chat.id,
614
+ text=f"<i>Something went wrong</i> <b> <a href='https://telegram.me/{OWNER_USERNAME}'>CLICK HERE FOR SUPPORT </a></b>",
615
+
616
+ disable_web_page_preview=True)
617
+ return
618
+ bypass = threading.Thread(target=lambda:loopthread(message),daemon=True)
619
+ bypass.start()
620
+
621
+
622
+
623
+ # doc thread
624
+ def docthread(message):
625
+ if message.document.file_name.endswith("dlc"):
626
+ msg = app.send_message(message.chat.id, "🔎 __bypassing...__", reply_to_message_id=message.id)
627
+ print("sent DLC file")
628
+ sess = requests.session()
629
+ file = app.download_media(message)
630
+ dlccont = open(file,"r").read()
631
+ link = bypasser.getlinks(dlccont,sess)
632
+ app.edit_message_text(message.chat.id, msg.id, f'__{link}__')
633
+ os.remove(file)
634
+
635
+ @app.on_message(filters.document)
636
+ async def docfile(client: pyrogram.client.Client, message: pyrogram.types.messages_and_media.message.Message):
637
+ result = collection.find_one({"role":"auth_chat"})
638
+ GROUP_ID = result["value"]
639
+ if str(message.chat.id).startswith("-100") and message.chat.id not in GROUP_ID:
640
+ return
641
+ elif message.chat.id not in GROUP_ID:
642
+ if UPDATES_CHANNEL != "None":
643
+ try:
644
+ user = await app.get_chat_member(UPDATES_CHANNEL, message.chat.id)
645
+ if user.status == enums.ChatMemberStatus.BANNED:
646
+ await app.send_message(
647
+ chat_id=message.chat.id,
648
+ text=f"__Sorry, you are banned. Contact My [ Owner ](https://telegram.me/{OWNER_USERNAME})__",
649
+ disable_web_page_preview=True
650
+ )
651
+ return
652
+ except UserNotParticipant:
653
+ await app.send_message(
654
+ chat_id=message.chat.id,
655
+ text="<i>🔐 Join Channel To Use Me 🔐</i>",
656
+ reply_markup=InlineKeyboardMarkup(
657
+ [
658
+ [
659
+ InlineKeyboardButton("🔓 Join Now 🔓", url=f"https://t.me/{UPDATES_CHANNEL}")
660
+ ]
661
+ ]
662
+ ),
663
+
664
+ )
665
+ return
666
+ except Exception:
667
+ await app.send_message(
668
+ chat_id=message.chat.id,
669
+ text=f"<i>Something went wrong</i> <b> <a href='https://telegram.me/{OWNER_USERNAME}'>CLICK HERE FOR SUPPORT </a></b>",
670
+
671
+ disable_web_page_preview=True)
672
+ return
673
+ if message.document.file_name.endswith(".dlc"):
674
+ bypass = threading.Thread(target=lambda:docthread(message),daemon=True)
675
+ bypass.start()
676
+
677
+
678
+
679
+ # server loop
680
+ print("Bot Starting")
681
+ app.run()
requirements.txt ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ requests
2
+ cloudscraper
3
+ bs4
4
+ python-dotenv
5
+ pyrogram==2.0.66
6
+ pymongo
7
+ tgcrypto
8
+ lxml
9
+ lk21
10
+ cfscrape
11
+ urllib3==1.26
12
+ Flask
13
+ gunicorn==20.1.0
14
+ aiohttp==3.8.1
15
+ flask_restful
16
+ pytz
17
+ motor
18
+ asyncio
19
+ curl-cffi
20
+ js2py
21
+ chromedriver_autoinstaller
22
+ selenium
runtime.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ python-3.9.14
scraper.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import re
2
+ # from re import match as rematch, findall, sub as resub, compile as recompile
3
+ import requests
4
+ # from requests import get as rget
5
+ # import base64
6
+ # from urllib.parse import unquote, urlparse, parse_qs, quote
7
+ # import time
8
+ import cloudscraper
9
+ from bs4 import BeautifulSoup, NavigableString, Tag
10
+ # from lxml import etree
11
+ # import hashlib
12
+ # import json
13
+ # from dotenv import load_dotenv
14
+ # load_dotenv()
15
+ # from asyncio import sleep as asleep
16
+ # import os
17
+ # import ddl
18
+ # from cfscrape import create_scraper
19
+ # from uuid import uuid4
20
+ # from requests import session
21
+ # from ddl import humanbytes
22
+
23
+
24
+ def scrape(url):
25
+ if '-' not in url:return 'Use with parameters -10bit -tmb -cine\n/search Moviename -10bit '
26
+ u=url.split('-')
27
+ if u[-1]=='10bit':
28
+ resp=requests.get(f'https://10bitclub.me/?s={u[0]}')
29
+ soup = BeautifulSoup(resp.content, "html.parser")
30
+ l=f'Search Result for {u[0]}\n'
31
+ for i in soup.find_all('div',{'class':'title'}):
32
+ li=i.a
33
+ l+=f'➥<a href="{li["href"]}">{str(i.get_text()).lstrip()}</a> |\n'
34
+ return l
35
+ if u[-1]=='tmb':
36
+ resp=requests.get(f'https://themoviesboss.site/?s={u[0]}')
37
+ soup = BeautifulSoup(resp.content, "html.parser")
38
+ l=f'Search Result for {u[0]}\n'
39
+ for i in soup.find_all('a',{'class':'p-url'}):
40
+ l+=f'➥<a href="{i.get("href")}">{str(i.text).lstrip()}</a>\n'
41
+ return l
42
+ if u[-1]=='cine':
43
+ resp=requests.get(f'https://cinevood.co.uk/?s={u[0]}')
44
+ soup = BeautifulSoup(resp.content, "html.parser")
45
+ l=f'Search Result for {u[0]}\n'
46
+ for i in soup.find_all('article',{'class':"latestPost excerpt"}):
47
+ l+=f'➥<a href="{i.a["href"]}">{str(i.a["title"]).lstrip()}</a>\n'
48
+ for i in soup.find_all('article',{'class':"latestPost excerpt first"}):
49
+ l+=f'➥<a href="{i.a["href"]}">{str(i.a["title"]).lstrip()}</a>\n'
50
+ for i in soup.find_all('article',{'class':"latestPost excerpt last"}):
51
+ l+=f'➥<a href="{i.a["href"]}">{str(i.a["title"]).lstrip()}</a>\n'
52
+ return l
53
+ if u[-1]=='atishmkv':
54
+ client = cloudscraper.create_scraper(allow_brotli=False)
55
+ res=client.get(f'https://atishmkv.wiki/?s={u[0]}')
56
+ soup=BeautifulSoup(res.content,'html.parser')
57
+ l=f'Search Result for {u[0]}\n'
58
+ for i in soup.find_all('h2',{'class':"entry-title"}):
59
+ l+=f'➥<a href="{i.a.get("href")}">{str(i.get_text()).lstrip()}</a> |\n'
60
+ return l
61
+
62
+
start.sh ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ if [ -z $UPSTREAM_REPO ]
2
+ then
3
+ echo "Cloning main Repository"
4
+ git clone https://github.com/bipinkrish/Link-Bypasser-Bot /Link-Bypasser-Bot
5
+ else
6
+ echo "Cloning Custom Repo from $UPSTREAM_REPO "
7
+ git clone $UPSTREAM_REPO /Link-Bypasser-Bot
8
+ fi
9
+ cd /Link-Bypasser-Bot
10
+ pip3 install -U -r requirements.txt
11
+ echo "Starting Bypass Bot...."
12
+ python3 main.py
testing/bot.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #7Z@QVcA^X*jV
2
+ #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
3
+ import pyrogram
4
+ from pyrogram import Client
5
+ from pyrogram import filters, enums
6
+ from pyrogram.types import InlineKeyboardMarkup,InlineKeyboardButton
7
+ from pyrogram.errors import UserNotParticipant
8
+ import os
9
+ import requests
10
+ from scraper import scrape
11
+ import re
12
+ from datetime import date, datetime
13
+ import time
14
+ from pyrogram.types import Message
15
+ import asyncio
16
+ #from test import animepahe
17
+ #from testing.utils.eval import aexec
18
+ import io
19
+ import sys
20
+ import traceback
21
+ import subprocess
22
+
23
+
24
+
25
+ # bot
26
+ bot_token = os.environ.get("TOKEN", "6226448350:AAE439-aTXuN8BbR1VO-ifkCYSsJ0cseadE")
27
+ api_hash = os.environ.get("HASH", "8b446e569ff634428df4ad723d01b7fd")
28
+ api_id = os.environ.get("ID", "25094651")
29
+
30
+ app = Client("my_bot",api_id=api_id, api_hash=api_hash,bot_token=bot_token)
31
+
32
+ @app.on_message(filters.command('q', prefixes='!'))
33
+ def send_start(client: pyrogram.client.Client, message: pyrogram.types.messages_and_media.message.Message):
34
+ app.send_message(message.chat.id,'hello')
35
+ async def aexec(code, client, message):
36
+ exec(
37
+ "async def __aexec(client, message): "
38
+ + "".join(f"\n {l_}" for l_ in code.split("\n"))
39
+ )
40
+ return await locals()["__aexec"](client, message)
41
+
42
+
43
+ @app.on_message(filters.command('qwe', prefixes='!'))
44
+ async def eval1(client: pyrogram.client.Client, message: pyrogram.types.messages_and_media.message.Message):
45
+ status_message = await message.reply_text("Processing ...")
46
+ cmd = message.text.split(" ", maxsplit=1)[1]
47
+ reply_to_ = message
48
+ if message.reply_to_message:
49
+ reply_to_ = message.reply_to_message
50
+ old_stderr = sys.stderr
51
+ old_stdout = sys.stdout
52
+ redirected_output = sys.stdout = io.StringIO()
53
+ redirected_error = sys.stderr = io.StringIO()
54
+ stdout, stderr, exc = None, None, None
55
+ try:
56
+ await aexec(cmd, client, message)
57
+ except Exception:
58
+ exc = traceback.format_exc()
59
+ stdout = redirected_output.getvalue()
60
+ stderr = redirected_error.getvalue()
61
+ sys.stdout = old_stdout
62
+ sys.stderr = old_stderr
63
+ evaluation = ""
64
+ if exc:
65
+ evaluation = exc
66
+ elif stderr:
67
+ evaluation = stderr
68
+ elif stdout:
69
+ evaluation = stdout
70
+ else:
71
+ evaluation = "Success"
72
+ final_output = "<b>EVAL</b>: "
73
+ final_output += f"<code>{cmd}</code>\n\n"
74
+ final_output += "<b>OUTPUT</b>:\n"
75
+ final_output += f"<code>{evaluation.strip()}</code> \n"
76
+ if len(final_output) > 4096:
77
+ with io.BytesIO(str.encode(final_output)) as out_file:
78
+ out_file.name = "eval.txt"
79
+ await reply_to_.reply_document(
80
+ document=out_file, caption=cmd, disable_notification=True
81
+ )
82
+ else:
83
+ await reply_to_.reply_text(final_output)
84
+ await status_message.delete()
85
+
86
+
87
+
88
+ # server loop
89
+ print("Bot Starting")
90
+ print('1')
91
+ app.run()
testing/scraper.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from re import match as rematch, findall, sub as resub, compile as recompile
3
+ import requests
4
+ from requests import get as rget
5
+ # import base64
6
+ # from urllib.parse import unquote, urlparse, parse_qs, quote
7
+ import time
8
+ # import cloudscraper
9
+ from bs4 import BeautifulSoup, NavigableString, Tag
10
+ # from lxml import etree
11
+ # import hashlib
12
+ # import json
13
+ # from dotenv import load_dotenv
14
+ # load_dotenv()
15
+ # from asyncio import sleep as asleep
16
+ # import os
17
+ # import ddl
18
+ # from cfscrape import create_scraper
19
+ # from uuid import uuid4
20
+ # from requests import session
21
+ # from ddl import humanbytes
22
+
23
+
24
+ def scrape(url):
25
+ if '-' not in url:return 'Use with -10bit -tmb -cine'
26
+ print(url)
27
+ u=url.split('-')
28
+ print(u)
29
+ if u[-1]=='10bit':
30
+ resp=requests.get(f'https://10bitclub.me/?s={u[0]}')
31
+ soup = BeautifulSoup(resp.content, "html.parser")
32
+ l='Titles:\n'
33
+ for i in soup.find_all('div',{'class':'title'}):
34
+ li=i.a
35
+ l+=f'➥<a href="{li["href"]}">{str(i.get_text()).lstrip()}</a> |\n'
36
+ return l
37
+ if u[-1]=='tmb':
38
+ resp=requests.get(f'https://themoviesboss.site/?s={u[0]}')
39
+ soup = BeautifulSoup(resp.content, "html.parser")
40
+ l='Titles:\n'
41
+ for i in soup.find_all('a',{'class':'p-url'}):
42
+ l+=f'➥<a href="{i.get("href")}">{str(i.text).lstrip()}</a>\n'
43
+ return l
44
+ if u[-1]=='cine':
45
+ resp=requests.get(f'https://cinevood.motorcycles/?s={u[0]}')
46
+ soup = BeautifulSoup(resp.content, "html.parser")
47
+ l='Titles:\n'
48
+ for i in soup.find_all('article',{'class':"latestPost excerpt"}):
49
+ l+=f'➥<a href="{i.a["href"]}">{str(i.a["title"]).lstrip()}</a>\n'
50
+ for i in soup.find_all('article',{'class':"latestPost excerpt first"}):
51
+ l+=f'➥<a href="{i.a["href"]}">{str(i.a["title"]).lstrip()}</a>\n'
52
+ for i in soup.find_all('article',{'class':"latestPost excerpt last"}):
53
+ l+=f'➥<a href="{i.a["href"]}">{str(i.a["title"]).lstrip()}</a>\n'
54
+ return l
55
+
56
+ def atozcartoon(word):
57
+ resp=requests.get(f'https://www.atozcartoonist.com/?s={word}')
58
+ soup = BeautifulSoup(resp.content, "html.parser")
59
+ l='Titles:\n'
60
+ for i in soup.find_all('h2',{'class':"entry-title h3"}):
61
+ l+=f'➥<a href="{i.a["href"]}">{str(i.a.text).lstrip()}</a>\n'
62
+ # for i in soup.find_all('article',{'class':"latestPost excerpt first"}):
63
+ # l+=f'➥<a href="{i.a["href"]}">{str(i.a["title"]).lstrip()}</a>\n'
64
+ # for i in soup.find_all('article',{'class':"latestPost excerpt last"}):
65
+ # l+=f'➥<a href="{i.a["href"]}">{str(i.a["title"]).lstrip()}</a>\n'
66
+ return l
67
+
68
+ print(atozcartoon('ben 10'))
69
+ #re=requests.get('https://www.atozcartoonist.com/2023/08/chhota-bheem-maha-shaitaan-ka-mahayudh-movie-multi-audio-download-480p-sdtv-web-dl.html')
70
+ #re=requests.get('https://www.atozcartoonist.com/2022/04/ben-10-reboot-season-2-hindienglish-episodes-download-1080p-fhd.html')
71
+ re=requests.get('https://www.atozcartoonist.com/2022/03/ben-10-classic-season-2-episodes-in-hindi-english-download-576p-hevc.html')
72
+ soup=BeautifulSoup(re.content,'html.parser')
73
+ print('j')
74
+ try:
75
+ for i in soup.select('div[class*="mks_accordion_item"]'):
76
+ print(i.text.strip())
77
+ print(i.a['href'])
78
+ l=i.text.strip()
79
+ except: print('j')
80
+ for i in soup.find_all('strong'):
81
+ try:
82
+ print(i.a['href'])
83
+ print(i.text.strip())
84
+ except:pass
85
+ # if l!=None:
86
+ # print('d')
87
+ # else:
88
+ # print('Something went Wrong')
89
+ for i in soup.find_all('strong'):
90
+ try:
91
+ print(i.a['href'])
92
+ print(i.text.strip())
93
+ except:pass
testing/test.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from re import match as rematch, findall, sub as resub, compile as recompile
3
+ import requests
4
+ from requests import get as rget,session
5
+ import time
6
+ import cloudscraper
7
+ from bs4 import BeautifulSoup, NavigableString, Tag
8
+ from requests import session
9
+ #from anime import set_direct_link
10
+ from base64 import standard_b64encode
11
+ from json import loads
12
+ from math import floor, pow
13
+ from os import environ
14
+ from re import findall, match, search, sub
15
+ from time import sleep
16
+ from urllib.parse import quote, unquote, urlparse, parse_qs
17
+ from uuid import uuid4
18
+
19
+ from bs4 import BeautifulSoup
20
+ from cfscrape import create_scraper
21
+ # fi=open('1231.html','w')
22
+ # fi.write(res.text)
23
+ # fi.close()
24
+ from lxml import etree
25
+ from requests import get,session
26
+ import requests
27
+ from asyncio import sleep as asleep, create_task,gather
28
+ import asyncio
29
+ import requests
30
+ from bs4 import BeautifulSoup
31
+
32
+ # def tenbit():
33
+ # soup=BeautifulSoup(requests.get('https://10bitclub.me/movies/broker/').content,'html.parser')
34
+ # l=''
35
+ # p=0
36
+ # # for i,j in zip(soup.find_all('span',{'style':"color: #ffffff;"}),soup.find_all('a',{'class':'mb-button'})):
37
+ # # l+=f'{i.text}\n'
38
+ # # print(i)
39
+ # # l+=f'<a href="{j.get("href")}">➥{str(j.text).lstrip()}</a> |\n'
40
+ # # print(j)
41
+ # # break
42
+ # for k in soup.find_all('p'):
43
+ # try:
44
+ # # print(k.span.text)
45
+ # l+=f'<a href="{k.span.a("href")}">➥{str(k.span.text).lstrip()}</a> |\n'
46
+ # except:pass
47
+ # return l
48
+
49
+ # print(tenbit())
50
+ #https://www.google.com/url?sa=t&source=web&rct=j&opi=89978449&url=https://www.infokeeda.xyz/2023/07/myforexfunds-review-2023.html%3Fm%3D1&ved=2ahUKEwiju4TG06CBAxUrSWwGHUamDEIQFnoECBEQAQ&usg=AOvVaw3Op1sNfIWr1dn5oy2ez3U7
51
+
52
+ #https://ontechhindi.com/token.php?post=stEwF3z
53
+
54
+ # url='https://techable.site/?id=OHI3eVduQzZ6bHBuYUo5dm9mRTJQT2hZODVGbHJDZHV4Q0NyWEswMlJWc2piMWRvNDhFbENxVEEyTjNCN3F1REpndzBodERlVEgvcXNTMGNubHJtNWk2eGdXem5MekVRNForN3M0T0VuTjVnanlZWkxxREwzaFMwek1aMVVqcXpBL012ckhFNWU5VEF0Q2pzbWEzN0pxQjFRRjVqZGF3OWxRVzFpUnFqL2RRPQ=='
55
+ # client = cloudscraper.create_scraper(allow_brotli=False)
56
+ # res=client.get(url,allow_redirects=False)
57
+ # r=open('12.html','w')
58
+ # r.write(res.text)
59
+
60
+ # def transcript(url: str, DOMAIN: str, ref: str, sltime) -> str:
61
+ # code = url.rstrip("/").split("/")[-1]
62
+ # cget = cloudscraper.create_scraper(allow_brotli=False).request
63
+ # resp = cget("GET", f"{DOMAIN}/{code}", headers={"referer": ref},allow_redirects=False)
64
+ # soup = BeautifulSoup(resp.content, "html.parser")
65
+ # data = { inp.get('name'): inp.get('value') for inp in soup.find_all("input") }
66
+ # print(data)
67
+ # sleep(sltime)
68
+ # resp = cget("POST", f"{DOMAIN}/links/go", data=data, headers={ "x-requested-with": "XMLHttpRequest" })
69
+ # try:
70
+ # return resp.json()['url']
71
+ # except:
72
+ # return "Something went wrong :("
73
+ # #print(transcript(url,'https://link1s.net','https://nguyenvanbao.com/',0))
74
+
75
+
76
+ # url='https://themoviesboss.online/secret?data=Ym1MRm1xV1JUc2VsM0xyZHVGNFVITitsK1ZzQStCVGFwS2Uvc3dYRkcrV0lwY1NLRVA0YmhNZzFsQ3BQUUI5cXg4azAwSWNXVU15alk1b3RIMXFRK2c9PTo6H8gNM_s_733h1dtQ3d_p_ukr6Q_e__e_'
77
+ # cget = cloudscraper.create_scraper(allow_brotli=False).request
78
+ # resp = cget("GET", url).url
79
+ # print(resp)
80
+
81
+ import requests
82
+
83
+ # cookies = {
84
+ # 'AppSession': '76e00694ea4fbe8fa997895d51ec6308',
85
+ # }
86
+
87
+ # headers = {
88
+ # 'authority': 'go.publicearn.com',
89
+ # 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8',
90
+ # 'accept-language': 'en-GB,en;q=0.5',
91
+ # # 'cookie': 'AppSession=76e00694ea4fbe8fa997895d51ec6308',
92
+ # 'referer': 'https://starxinvestor.com/',
93
+ # 'sec-ch-ua': '"Brave";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
94
+ # 'sec-ch-ua-mobile': '?0',
95
+ # 'sec-ch-ua-platform': '"Linux"',
96
+ # 'sec-fetch-dest': 'document',
97
+ # 'sec-fetch-mode': 'navigate',
98
+ # 'sec-fetch-site': 'cross-site',
99
+ # 'sec-fetch-user': '?1',
100
+ # 'sec-gpc': '1',
101
+ # 'upgrade-insecure-requests': '1',
102
+ # 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
103
+ # }
104
+
105
+ # params = {
106
+ # 'uid': '350533',
107
+ # }
108
+ # #, cookies=cookies
109
+ # #https://go.publicearn.com/Oe6oRrr/?uid=350533
110
+ # response = requests.get('https://go.publicearn.com/Oe6oRrr/', params=params, headers=headers,allow_redirects=False)
111
+ # print(response.text)
112
+ # # f=open('rr1.html','w')
113
+ # # f.write(response.text)
114
+
115
+ # import requests
116
+
117
+ # cookies = {
118
+ # 'tp': 's0i2WwX',
119
+ # }
120
+
121
+ # headers = {
122
+ # 'authority': 'starxinvestor.com',
123
+ # 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8',
124
+ # 'accept-language': 'en-GB,en;q=0.7',
125
+ # 'cache-control': 'max-age=0',
126
+ # # 'cookie': 'tp=s0i2WwX',
127
+ # 'referer': 'https://www.google.com/',
128
+ # 'sec-ch-ua': '"Brave";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
129
+ # 'sec-ch-ua-mobile': '?0',
130
+ # 'sec-ch-ua-platform': '"Linux"',
131
+ # 'sec-fetch-dest': 'document',
132
+ # 'sec-fetch-mode': 'navigate',
133
+ # 'sec-fetch-site': 'cross-site',
134
+ # 'sec-fetch-user': '?1',
135
+ # 'sec-gpc': '1',
136
+ # 'upgrade-insecure-requests': '1',
137
+ # 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
138
+ # }
139
+
140
+ # response = requests.get(
141
+ # 'https://starxinvestor.com/index.php/2023/05/11/best-education-college-in-world-list-top-10-college-in-world-with-their-ranking/',
142
+ # cookies=cookies,
143
+ # headers=headers,
144
+ # )
145
+ # f=open('rr1.html','w')
146
+ # f.write(response.text)
147
+
148
+ def is_share_link(url):
149
+ return bool(re.search(r'gofile.io|toonshub\.xyz|toonshub\.link|www\.toonshub\.link|www\.toonshub\.xyz|toonworld4all\.me|www\.instagram|youtu|www\.youtu|www\.youtube|indexlink|d\.terabox|mega\.nz|t\.me|telegram|workers\.dev', url))
150
+
151
+ if is_share_link('https://www.toonshub.link/episode/zom-100-bucket-list-of-the-dead/1x1'):
152
+ print('l')
153
+
154
+
texts.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gdrivetext = """__- appdrive \n\
2
+ - driveapp \n\
3
+ - drivehub \n\
4
+ - gdflix \n\
5
+ - drivesharer \n\
6
+ - drivebit \n\
7
+ - drivelinks \n\
8
+ - driveace \n\
9
+ - drivepro \n\
10
+ - driveseed \n\
11
+ __"""
12
+
13
+
14
+ otherstext = """__- exe, exey \n\
15
+ - sub2unlock, sub2unlock \n\
16
+ - rekonise \n\
17
+ - letsboost \n\
18
+ - phapps2app \n\
19
+ - mboost \n\
20
+ - sub4unlock \n\
21
+ - ytsubme \n\
22
+ - bitly \n\
23
+ - social-unlock \n\
24
+ - boost \n\
25
+ - gooly \n\
26
+ - shrto \n\
27
+ - tinyurl
28
+ __"""
29
+
30
+
31
+ ddltext = """__- yandex \n\
32
+ - mediafire \n\
33
+ - uptobox \n\
34
+ - osdn \n\
35
+ - github \n\
36
+ - hxfile \n\
37
+ - 1drv (onedrive) \n\
38
+ - pixeldrain \n\
39
+ - antfiles \n\
40
+ - streamtape \n\
41
+ - racaty \n\
42
+ - 1fichier \n\
43
+ - solidfiles \n\
44
+ - krakenfiles \n\
45
+ - upload \n\
46
+ - akmfiles \n\
47
+ - linkbox \n\
48
+ - shrdsk \n\
49
+ - letsupload \n\
50
+ - zippyshare \n\
51
+ - wetransfer \n\
52
+ - terabox, teraboxapp, 4funbox, mirrobox, nephobox, momerybox \n\
53
+ - filepress \n\
54
+ - anonfiles, hotfile, bayfiles, megaupload, letsupload, filechan, myfile, vshare, rapidshare, lolabits, openload, share-online, upvid \n\
55
+ - fembed, fembed, femax20, fcdn, feurl, layarkacaxxi, naniplay, nanime, naniplay, mm9842 \n\
56
+ - sbembed, watchsb, streamsb, sbplay.
57
+ __"""
58
+
59
+
60
+ shortnertext = """__- igg-games \n\
61
+ - olamovies\n\
62
+ - katdrive \n\
63
+ - drivefire\n\
64
+ - kolop \n\
65
+ - hubdrive \n\
66
+ - filecrypt \n\
67
+ - shareus \n\
68
+ - shortingly \n\
69
+ - gyanilinks \n\
70
+ - shorte \n\
71
+ - psa \n\
72
+ - sharer \n\
73
+ - new1.gdtot \n\
74
+ - adfly\n\
75
+ - gplinks\n\
76
+ - droplink \n\
77
+ - linkvertise \n\
78
+ - rocklinks \n\
79
+ - ouo \n\
80
+ - try2link \n\
81
+ - htpmovies \n\
82
+ - sharespark \n\
83
+ - cinevood\n\
84
+ - atishmkv \n\
85
+ - urlsopen \n\
86
+ - xpshort, techymozo \n\
87
+ - dulink \n\
88
+ - ez4short \n\
89
+ - krownlinks \n\
90
+ - teluguflix \n\
91
+ - taemovies \n\
92
+ - toonworld4all \n\
93
+ - animeremux \n\
94
+ - adrinolinks \n\
95
+ - tnlink \n\
96
+ - flashlink \n\
97
+ - short2url \n\
98
+ - tinyfy \n\
99
+ - mdiskshortners \n\
100
+ - earnl \n\
101
+ - moneykamalo \n\
102
+ - easysky \n\
103
+ - indiurl \n\
104
+ - linkbnao \n\
105
+ - mdiskpro \n\
106
+ - tnshort \n\
107
+ - indianshortner \n\
108
+ - rslinks \n\
109
+ - bitly, tinyurl \n\
110
+ - thinfi \n\
111
+ __"""
112
+
113
+
114
+ HELP_TEXT = f'**--Just Send me any Supported Links From Below Mentioned Sites--** \n\n\
115
+ **List of Sites for DDL : ** \n\n{ddltext} \n\
116
+ **List of Sites for Shortners : ** \n\n{shortnertext} \n\
117
+ **List of Sites for GDrive Look-ALike : ** \n\n{gdrivetext} \n\
118
+ **Other Supported Sites : ** \n\n{otherstext}'
119
+
120
+ #class script(object):
121
+
122
+ RESTART_TXT = """
123
+ <b>Bᴏᴛ Rᴇsᴛᴀʀᴛᴇᴅ !
124
+ 📅 Dᴀᴛᴇ : <code>{}</code>
125
+ ⏰Tɪᴍᴇ : <code>{}</code>
126
+ 🌐 Tɪᴍᴇᴢᴏɴᴇ : <code>Asia/Kolkata</code></b>"""
127
+
128
+ START_TEXT = """
129
+ <i>👋 Hᴇʏ, Tʜᴇʀᴇ </i>{}\n
130
+ <i>I'ᴍ Lɪɴᴋ ʙʏᴘᴀss ʙᴏᴛ ᴀɴᴅ sᴄʀᴀᴘᴘᴇʀ ʙᴏᴛ </i>\n
131
+ <i>Fᴏʀ Mᴏʀᴇ ɪɴғᴏ Cʟɪᴄᴋ ᴏɴ ʜᴇʟᴘ</i>\n
132
+ <i>Sᴇɴᴅ /sɪᴛᴇs ᴛᴏ sᴇᴇ Sᴜᴘᴘᴏʀᴛᴇᴅ Sɪᴛᴇs</i>\n
133
+ <i>🍃 Bᴏᴛ Mᴀɪɴᴛᴀɪɴᴇᴅ Bʏ: <a href="https://t.me/Jack_Frost_003">ᴊᴀᴄᴋ ғʀᴏsᴛ</a></i>\n\n
134
+ <i> Bᴏᴛ Pʀᴏᴠɪᴅᴇᴅ ʙʏ: <a href="https://t.me/MrPrinceBotz">MʀPʀɪɴᴄᴇ Bᴏᴛᴢ</i>"""
135
+
136
+ SHORT_TEXT="""
137
+ <i>✯ Hᴇʟʟᴏ Bʀᴏ I ᴄᴀɴ ʙʏᴘᴀss ᴀɴᴏʏɪɴɢ ᴀᴅʟɪɴᴋ sᴇɴᴅ ᴍᴇ ᴛʜᴇ ʟɪɴᴋ ɪ ᴡɪʟʟ ɢɪᴠᴇ ᴛʜᴇ ʙʏᴘᴀss ʟɪɴᴋ<i>\n
138
+ <i>✯ Sᴜᴘᴘᴏʀᴛᴇᴅ Sᴄʀᴀᴘᴘᴇʀ Sɪᴛᴇs: \nCɪɴᴇᴠᴏᴏᴅ\n10ʙɪᴛ\nᴛʜᴇᴍᴏᴠɪᴇʙᴏss\nᴀɴɪᴍᴇᴘᴀʜᴇ\nᴛᴏᴏɴᴡᴏʀʟᴅ4ᴀʟʟ\nsʜᴀʀᴇsᴘᴀʀᴋ\nsᴋʏᴍᴏᴠɪᴇsʜᴅ\nᴀᴛᴏᴢᴄᴀʀᴛᴏᴏɴɪsᴛ</i>\n
139
+ <i>✯ Usᴇ /help ᴛᴏ sᴇᴇ sᴜᴘᴘᴏʀᴛᴇᴅ sɪᴛᴇs</i>\n
140
+ <i>✯ Aɴʏ ᴇʀʀᴏʀ ᴏʀ sᴜɢɢᴇsᴛɪᴏɴ ғᴇʟʟ ғʀᴇᴇ ᴛᴏ ᴍᴇssᴀɢᴇ ʜᴇʀᴇ <a href="https://t.me/MrPrinceSupport">sᴜᴘᴘᴏʀᴛ ɢʀᴏᴜᴘ</a></i>
141
+
142
+ """
143
+ ABOUT_TEXT="""
144
+ ╭────[ ᴍʀᴘʀɪɴᴄᴇ ʙʏᴘᴀss ʙᴏᴛ ]─── ❅
145
+ ├ ❅ ɴᴀᴍᴇ : <a href="https://t.me/MrPrince_Link_Bypass_bot">ᴍʀᴘʀɪɴᴄᴇ ʙʏᴘᴀss ʙᴏᴛ</a>
146
+ ├ ❅ ᴏᴡɴᴇʀ : <a href="https://t.me/Jack_Frost_003">ᴊᴀᴄᴋ ғʀᴏsᴛ </a>
147
+ ├ ❅ ᴠᴇʀsɪᴏɴ : <a href="https://t.me/MrPrince_Link_Bypass_bot">3.1.2 </a>
148
+ ├ ❅ ʟᴀɴɢᴜᴀɢᴇ : <a href="https://www.python.org/download/releases/3.0/">ᴘʏᴛʜᴏɴ 3 </a>
149
+ ├ ❅ ꜰʀᴀᴍᴇᴡᴏʀᴋ : <a href="https://docs.pyrogram.org/">ᴘʏʀᴏɢʀᴀᴍ </a>
150
+ ├ ❅ ᴅᴀᴛᴀʙᴀsᴇ : <a href="https://www.mongodb.com/">ᴍᴏɴɢᴏ ᴅʙ </a>
151
+ ├ ❅ ᴅᴇᴠᴇʟᴏᴘᴇʀ : <a href="https://t.me/Jack_Frost_003">ᴊᴀᴄᴋ ғʀᴏsᴛ </a>
152
+ ├ ❅ ᴍʏ ʙᴇsᴛ ꜰʀɪᴇɴᴅ : <a href="tg://settings">ᴛʜɪs ᴘᴇʀsᴏɴ </a>
153
+ ╰────────────────────────────── ❅
154
+ """
u.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+
3
+ headers = {
4
+ 'authority': 'publicearn.com',
5
+ 'accept': '*/*',
6
+ 'accept-language': 'en-US,en;q=0.5',
7
+ 'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
8
+ 'origin': 'https://starxinvestor.com',
9
+ 'referer': 'https://starxinvestor.com/',
10
+ 'sec-ch-ua': '"Brave";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
11
+ 'sec-ch-ua-mobile': '?0',
12
+ 'sec-ch-ua-platform': '"macOS"',
13
+ 'sec-fetch-dest': 'empty',
14
+ 'sec-fetch-mode': 'cors',
15
+ 'sec-fetch-site': 'cross-site',
16
+ 'sec-gpc': '1',
17
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
18
+ }
19
+
20
+ data = {
21
+ 'step_1': 'avmQ6e',
22
+ 'data': '3SeTEJE0McEmh0o3jb2CJ6swxzCdPMnCKrhUogC/BwG65knNzfuaTjoXucRdN33Zv7aa2NGUrsE81n8HmUG34oVjl9wooYDXbXLfaIkJyXQHLm9yoOuhvAA5V4dN9xMq2MY3Z9AiYXPOIRzA7CF30Q==',
23
+ }
24
+
25
+ response = requests.post('https://publicearn.com/link/verify.php', headers=headers, data=data)
26
+ print(response.json())
update.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from os import path as opath, getenv
2
+ from logging import FileHandler, StreamHandler, INFO, basicConfig, error as log_error, info as log_info
3
+ from logging.handlers import RotatingFileHandler
4
+ from subprocess import run as srun
5
+ from dotenv import load_dotenv
6
+
7
+ basicConfig(
8
+ level=INFO,
9
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s [%(filename)s:%(lineno)d]",
10
+ datefmt="%d-%b-%y %I:%M:%S %p",
11
+ handlers=[
12
+ RotatingFileHandler("log.txt", maxBytes=50000000, backupCount=10),
13
+ StreamHandler(),
14
+ ],
15
+ )
16
+ load_dotenv('config.env', override=True)
17
+
18
+ UPSTREAM_REPO = getenv('UPSTREAM_REPO', "https://JackFrost003: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@github.com/JackFrost003/Bypass_bot")
19
+ UPSTREAM_BRANCH = getenv('UPSTREAM_BRANCH', "main")
20
+
21
+ if UPSTREAM_REPO is not None:
22
+ if opath.exists('.git'):
23
+ srun(["rm", "-rf", ".git"])
24
+
25
+ update = srun([f"git init -q \
26
+ && git config --global user.email jackfro03@gmail.com \
27
+ && git config --global user.name JackFrost003 \
28
+ && git add . \
29
+ && git commit -sm update -q \
30
+ && git remote add origin {UPSTREAM_REPO} \
31
+ && git fetch origin -q \
32
+ && git reset --hard origin/{UPSTREAM_BRANCH} -q"], shell=True)
33
+
34
+ if update.returncode == 0:
35
+ log_info('Successfully updated with latest commit from UPSTREAM_REPO')
36
+ else:
37
+ log_error('Something went wrong while updating, check UPSTREAM_REPO if valid or not!')
utils/eval.py ADDED
@@ -0,0 +1 @@
 
 
1
+ print('hello')
ww.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from selenium import webdriver
2
+ from selenium.webdriver.common.by import By
3
+ from selenium.webdriver.support import expected_conditions as ec
4
+ from selenium.webdriver.support.ui import WebDriverWait
5
+ import chromedriver_autoinstaller
6
+ from bs4 import BeautifulSoup
7
+ import cloudscraper
8
+ import os
9
+ import time
10
+ uid=os.environ.get('PEUID')
11
+ def publicearn(url,uid):
12
+ chromedriver_autoinstaller.install()
13
+ chrome_options = webdriver.ChromeOptions()
14
+ chrome_options.add_argument("--no-sandbox")
15
+ chrome_options.add_argument("--headless")
16
+ chrome_options.add_argument("--disable-dev-shm-usage")
17
+ driver = webdriver.Chrome(options=chrome_options)
18
+ driver.get(url)
19
+ for i in range(0,29):
20
+ time.sleep(1)
21
+ print(i)
22
+ code=url.split('/')[-1]
23
+ ref=(driver.current_url).split('//')[-1].split('/')[0]
24
+ print(ref)
25
+ cget = cloudscraper.create_scraper(allow_brotli=False).request
26
+ resp = cget("GET", f"https://go.publicearn.com/{code}/?uid={uid}", headers={"referer": f'https://{ref}/'})
27
+ soup = BeautifulSoup(resp.content, "html.parser")
28
+ data = { inp.get('name'): inp.get('value') for inp in soup.find_all("input") }
29
+ print(data)
30
+ resp = cget("POST", f"https://go.publicearn.com/links/go", data=data, headers={ "x-requested-with": "XMLHttpRequest" })
31
+ try:
32
+ return resp.json()['url']
33
+ except Exception as e:
34
+ print(e)
35
+
36
+ url='https://publicearn.com/ZFd5'
37
+ print(publicearn(url,uid))