import os
import requests
from bs4 import BeautifulSoup
import os
from urllib import request
import time
import random
import multiprocessing

headers={'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 5.1; Win64; x64; rv:60.0) Gecko/20100101 Firefox/2.0.0.11'}


class JavGuru(object):
    def __init__(self):
        # self.homepage = HOMEPAGE
        self.res_encoding = 'utf8'
        self.max_pages = 275
    
    def get_page_url(self, idx):
        if idx >= 2:
            url = 'https://jav.guru/jav-actress-list/page/{}/'.format(idx)
        else:
            url = 'https://jav.guru/jav-actress-list/'
        return url

    def run(self, save_root):
        for i in range(1, self.max_pages):
            page_url = self.get_page_url(i)
            self.crawl_page(page_url, save_root)

    def crawl_page(self, url, save_folder):
        try:
            response = requests.get(url, headers=headers)
            response.encoding = self.res_encoding
            soup = BeautifulSoup(response.text, features='lxml')
            print(soup)
            container = soup.find(attrs={'class': 'actress-box'})
            for item in container.find_all('a'):
                print(item)
                break
                # href = item.a['href']
                # href = '{}/{}'.format(self.homepage, href)
                # title = item.a['title']
                # date = item.a.i.text
                # set_info.append((href, title, date))
        except Exception as e:
            print('error: ' + str(e))

    def download_image(self, image_url, save_path):
        image_url = image_url.replace('https', 'http')
        try:
            req = request.Request(image_url, headers=headers)
            data = request.urlopen(req).read()
        except Exception as e:
            print('download error: ' + str(e))
            return 
        # save data
        if len(data) > 0:
            with open(save_path, 'wb') as f:
                f.write(data)
        time.sleep(0.1)


def run():
    crawler = JavGuru()
    save_root = '/Users/liyu/Desktop/data/javface/images/javguru'
    page_url = 'https://jav.guru/jav-actress-list/page/3/'
    crawler.crawl_page(page_url, save_root)


if __name__ == '__main__':
    run()
