import requests
from bs4 import BeautifulSoup
import os
from urllib import request
import time
import random

headers={'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 5.1; Win64; x64; rv:60.0) Gecko/20100101 Firefox/2.0.0.11'}


class Crawler(object):
    def __init__(self):
        # https://xslist.org/en/model/92094.html
        self.homepage = 'https://xslist.org'
        self.res_encoding = 'utf8'

    def get_idol_page_url(self, idol_index):
        return '{}/zh/model/{}.html'.format(self.homepage, idol_index)

    def run_actress(self, start=1, stop=-1):
        pass
    
    def download_idol(self, idol_index):
        idol_url = self.get_idol_page_url(idol_index)
        print(idol_url)
        try:
            s = requests.session()
            s.keep_alive = False
            response = requests.get(idol_url, headers=headers)
            response.encoding = self.res_encoding
            soup = BeautifulSoup(response.text, features='lxml')
            print(soup)
        except Exception as e:
            print('error: ' + str(e))
        return

    def download_image(self, image_url, save_path):
        if os.path.exists(save_path):
            return
        image_url = image_url.replace('https', 'http')
        try:
            req = request.Request(image_url, headers=headers)
            data = request.urlopen(req).read()
        except Exception as e:
            print('download error: ' + str(e))
            return 
        # save data
        if len(data) > 0:
            with open(save_path, 'wb') as f:
                f.write(data)


def download_url():
    url = 'https://javmodel.com/jav/ai-asakura/            ................. .'
    try:
        s = requests.session()
        s.keep_alive = False
        response = requests.get(url, headers=headers)
        response.encoding = self.res_encoding
        soup = BeautifulSoup(response.text, features='lxml')
        print(soup)
    except Exception as e:
        print('error: ' + str(e))
    return


if __name__ == '__main__':
    # crawler = XSList()
    # crawler.download_idol(92094)
    download_url()
