Kyo-Kai commited on
Commit
71de518
1 Parent(s): 84427de

Update sites/danbooru.py

Browse files
Files changed (1) hide show
  1. sites/danbooru.py +25 -16
sites/danbooru.py CHANGED
@@ -1,6 +1,8 @@
1
  import time
2
  import urllib.request
3
  import os
 
 
4
  from random import randint
5
  from selenium.webdriver.common.by import By
6
  from selenium.webdriver.common.keys import Keys
@@ -11,7 +13,7 @@ from commands.exec_path import imgList
11
  from commands.universal import searchQuery, save_Search, continue_Search, contains_works
12
  from ai.classifying_ai import img_classifier
13
 
14
- def getOrderedDanbooruImages(driver, exec_path, user_search, num_pics, num_pages, filters, bl_tags, inc_tags, imageControl):
15
  global image_locations, bl_tags_list, inc_tags_list, image_names, ai_mode,rating_filters
16
  image_names = imgList(mode=0)
17
  image_locations = []
@@ -23,15 +25,16 @@ def getOrderedDanbooruImages(driver, exec_path, user_search, num_pics, num_pages
23
  driver.get(link)
24
 
25
  # Rating Filter Creation
26
- rating_filters = ["s","e"]
 
27
  rating_filters = ["q","s","e"] if 4 in filters else []
28
 
29
  # Tag list creation
30
  score = 1 if 0 in filters else 0
31
  match_type = 1 if 1 in filters else 0
32
- r_18 = pg_lenient()
33
  r_18 = pg_strict() if 3 in filters else r_18
34
- ai_mode = 1
35
 
36
  continue_search = 1 if imageControl else 0
37
 
@@ -55,7 +58,7 @@ def getOrderedDanbooruImages(driver, exec_path, user_search, num_pics, num_pages
55
 
56
  curr_page = driver.current_url
57
  while len(image_locations) < num_pics*num_pages:
58
- pages_to_search(driver, num_pages, num_pics, exec_path)
59
  if curr_page == driver.current_url and len(image_locations) < num_pics*num_pages:
60
  print("Reached end of search results")
61
  break
@@ -69,20 +72,19 @@ def filter_score(score):
69
  return " order:score"
70
  return ""
71
 
72
- def pages_to_search(driver, num_pages, num_pics, exec_path):
73
  for i in range(num_pages):
74
  WebDriverWait(driver, timeout=11).until(EC.presence_of_element_located((By.XPATH, '//*[@class="posts-container gap-2"]')))
75
  # Selects the picture grids
76
  images = driver.find_element(
77
  By.XPATH, '//*[@class="posts-container gap-2"]'
78
  ).find_elements(By.CLASS_NAME, "post-preview-link")
79
- grid_search(driver, num_pics, images, exec_path, num_pages)
80
  save_Search(driver, mode=1)
81
  if not valid_page(driver) or len(image_locations) >= num_pics*num_pages:
82
  break
83
 
84
- def grid_search(driver, num_pics, images, exec_path, num_pages):
85
- time.sleep(2)
86
  temp_img_len = len(image_locations)
87
  for n_iter, image in enumerate(images):
88
  if len(image_locations) >= num_pics*num_pages or len(image_locations) - temp_img_len >= num_pics:
@@ -102,7 +104,7 @@ def grid_search(driver, num_pics, images, exec_path, num_pages):
102
 
103
  if ai_mode:
104
  checker = 0
105
- image_loc = download_image(exec_path=exec_path, driver=driver, image=image)
106
  if img_classifier(image_loc):
107
  print("AI Mode: I approve this image")
108
  else:
@@ -114,7 +116,7 @@ def grid_search(driver, num_pics, images, exec_path, num_pages):
114
 
115
  driver, tempImg = tab_handler(driver=driver,image=image)
116
  WebDriverWait(driver, timeout=15).until(EC.presence_of_element_located((By.XPATH, '//*[@id="post-option-download"]/a')))
117
- download_image(exec_path=exec_path, driver=driver)
118
  driver = tab_handler(driver=driver)
119
 
120
  else:
@@ -192,7 +194,7 @@ def pg_strict():
192
  "covered_nipples", "thigh_focus", "thighs", "bikini", "swimsuit", "grabbing_another's_breast", "huge_breasts",
193
  "foot_focus", "licking_foot", "foot_worship", "shirt_lift","clothes_lift", "underwear", "panties_under_pantyhose"]
194
 
195
- def download_image(exec_path, driver, image=0):
196
  if not image:
197
  tempDL = driver.find_element(By.XPATH, '//*[@id="post-option-download"]/a')
198
  tempDLAttr = tempDL.get_attribute("href")
@@ -203,10 +205,17 @@ def download_image(exec_path, driver, image=0):
203
  print(f"\n{tempDLAttr.split('?')[0]}")
204
 
205
  img_loc = f"./{exec_path.folder_path}/{tempDLName}"
206
- urllib.request.urlretrieve(
207
- tempDLAttr, f"./{exec_path.folder_path}/{tempDLName}"
208
- )
 
 
 
 
 
 
 
209
  if not image:
210
- image_locations.append(f"./{exec_path.folder_path}/{tempDLName}")
211
  image_names.append(f"{tempDLName.split('.')[0]}")
212
  return img_loc
 
1
  import time
2
  import urllib.request
3
  import os
4
+ import aiohttp
5
+ import asyncio
6
  from random import randint
7
  from selenium.webdriver.common.by import By
8
  from selenium.webdriver.common.keys import Keys
 
13
  from commands.universal import searchQuery, save_Search, continue_Search, contains_works
14
  from ai.classifying_ai import img_classifier
15
 
16
+ async def getOrderedDanbooruImages(driver, user_search, num_pics, num_pages, filters, bl_tags, inc_tags, exec_path, imageControl):
17
  global image_locations, bl_tags_list, inc_tags_list, image_names, ai_mode,rating_filters
18
  image_names = imgList(mode=0)
19
  image_locations = []
 
25
  driver.get(link)
26
 
27
  # Rating Filter Creation
28
+ rating_filters = ["e"] if 2 in filters else []
29
+ rating_filters = ["s","e"] if 3 in filters else []
30
  rating_filters = ["q","s","e"] if 4 in filters else []
31
 
32
  # Tag list creation
33
  score = 1 if 0 in filters else 0
34
  match_type = 1 if 1 in filters else 0
35
+ r_18 = pg_lenient() if 2 in filters else []
36
  r_18 = pg_strict() if 3 in filters else r_18
37
+ ai_mode = 1 if 5 in filters else 0
38
 
39
  continue_search = 1 if imageControl else 0
40
 
 
58
 
59
  curr_page = driver.current_url
60
  while len(image_locations) < num_pics*num_pages:
61
+ await pages_to_search(driver, num_pages, num_pics, exec_path)
62
  if curr_page == driver.current_url and len(image_locations) < num_pics*num_pages:
63
  print("Reached end of search results")
64
  break
 
72
  return " order:score"
73
  return ""
74
 
75
+ async def pages_to_search(driver, num_pages, num_pics, exec_path):
76
  for i in range(num_pages):
77
  WebDriverWait(driver, timeout=11).until(EC.presence_of_element_located((By.XPATH, '//*[@class="posts-container gap-2"]')))
78
  # Selects the picture grids
79
  images = driver.find_element(
80
  By.XPATH, '//*[@class="posts-container gap-2"]'
81
  ).find_elements(By.CLASS_NAME, "post-preview-link")
82
+ await grid_search(driver, num_pics, images, exec_path, num_pages)
83
  save_Search(driver, mode=1)
84
  if not valid_page(driver) or len(image_locations) >= num_pics*num_pages:
85
  break
86
 
87
+ async def grid_search(driver, num_pics, images, exec_path, num_pages):
 
88
  temp_img_len = len(image_locations)
89
  for n_iter, image in enumerate(images):
90
  if len(image_locations) >= num_pics*num_pages or len(image_locations) - temp_img_len >= num_pics:
 
104
 
105
  if ai_mode:
106
  checker = 0
107
+ image_loc = await download_image(exec_path=exec_path, driver=driver, image=image)
108
  if img_classifier(image_loc):
109
  print("AI Mode: I approve this image")
110
  else:
 
116
 
117
  driver, tempImg = tab_handler(driver=driver,image=image)
118
  WebDriverWait(driver, timeout=15).until(EC.presence_of_element_located((By.XPATH, '//*[@id="post-option-download"]/a')))
119
+ await download_image(exec_path=exec_path, driver=driver)
120
  driver = tab_handler(driver=driver)
121
 
122
  else:
 
194
  "covered_nipples", "thigh_focus", "thighs", "bikini", "swimsuit", "grabbing_another's_breast", "huge_breasts",
195
  "foot_focus", "licking_foot", "foot_worship", "shirt_lift","clothes_lift", "underwear", "panties_under_pantyhose"]
196
 
197
+ async def download_image(exec_path, driver, image=0):
198
  if not image:
199
  tempDL = driver.find_element(By.XPATH, '//*[@id="post-option-download"]/a')
200
  tempDLAttr = tempDL.get_attribute("href")
 
205
  print(f"\n{tempDLAttr.split('?')[0]}")
206
 
207
  img_loc = f"./{exec_path.folder_path}/{tempDLName}"
208
+
209
+ async with aiohttp.ClientSession() as session:
210
+ async with session.get(tempDLAttr) as resp:
211
+ with open(img_loc, 'wb') as fd:
212
+ while True:
213
+ chunk = await resp.content.read(1024)
214
+ if not chunk:
215
+ break
216
+ fd.write(chunk)
217
+
218
  if not image:
219
+ image_locations.append(img_loc)
220
  image_names.append(f"{tempDLName.split('.')[0]}")
221
  return img_loc