import os
import requests
from bs4 import BeautifulSoup
import os
from urllib import request
import time
import random
import multiprocessing
from PIL import Image

headers={'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 5.1; Win64; x64; rv:60.0) Gecko/20100101 Firefox/2.0.0.11'}


class JavDatabase(object):
    def __init__(self):
        self.res_encoding = 'utf8'
        self.homepage = 'https://www.javdatabase.com/idols/page/2/'
        self.max_pages = 1519
    
    def get_page_url(self, idx):
        if idx == 1:
            url = 'https://www.javdatabase.com/idols/'
        else:
            url = 'https://www.javdatabase.com/idols/page/{}'.format(idx)
        return url

    def run(self, save_root):
        for i in range(1, self.max_pages):
            page_url = self.get_page_url(i)
            self.crawl_page(page_url, save_root)

    def crawl_page(self, url, save_folder):
        try:
            response = requests.get(url, headers=headers)
            response.encoding = self.res_encoding
            soup = BeautifulSoup(response.text, features='lxml')
            container = soup.find(attrs={'class': 'facetwp-template'})
            for item in container.find_all(attrs={'class': 'idol-thumb'}):
                href = item.a['href']
                img = item.a.img
                img_url = img['src']
                image_name = img_url.split('/')[-1]
                image_path = os.path.join(save_folder, image_name)
                # thumb to full
                img_url = img_url.replace('thumb', 'full')
                if not os.path.exists(image_path):
                    print('downloading: {}'.format(img_url))
                    self.download_image(img_url, image_path)
        except Exception as e:
            print('error: ' + str(e))

    def download_image(self, image_url, save_path):
        # image_url = image_url.replace('https', 'http')
        try:
            req = request.Request(image_url, headers=headers)
            data = request.urlopen(req).read()
        except Exception as e:
            print('download error: ' + str(e))
            return 
        # save data
        if len(data) > 0:
            with open(save_path, 'wb') as f:
                f.write(data)
        time.sleep(0.1)
    
    def download_webp(self, image_url, save_path):
        try:
            req = request.Request(image_url, headers=headers)
            data = request.urlopen(req).read()
        except Exception as e:
            print('download error: ' + str(e))
            return 
        
        # save data
        if len(data) > 0:
            # image = Image.frombytes()
            with open(save_path, 'wb') as f:
                f.write(data)
        time.sleep(0.1)


def run():
    crawler = JavDatabase()
    save_root = '/Users/liyu/Desktop/data/javface/images/javdatabase'
    crawler.run(save_root)

def proc_func(crawler, save_folder, indices):
    for i in indices:
        page_url = crawler.get_page_url(i)
        crawler.crawl_page(page_url, save_folder)

def run_mp(nprocs=8):
    crawler = JavDatabase()
    save_root = '/Users/liyu/Desktop/data/javface/images/javdatabase_full'
    if not os.path.exists(save_root):
        os.makedirs(save_root)
    npages = crawler.max_pages
    step = (npages + nprocs - 1) // nprocs
    batches = [[] for _ in range(nprocs)]
    for i in range(1, npages + 1):
        k = i % nprocs
        batches[k].append(i)
    for b in batches:
        b = reversed(b)
        t = multiprocessing.Process(target=proc_func, args=(crawler, save_root, b))
        t.start()


if __name__ == '__main__':
    # run()
    run_mp()
