#!/usr/bin/env python
# encoding: utf-8

import uuid
from icrawler import Crawler, Feeder, Parser, Downloader
from bs4 import BeautifulSoup
import logging
import requests
from six.moves.urllib.parse import urlparse
from service.service import HttpService


SEARCH_HOST_URL = 'https://justpicsplease.com/photos/{name}/{page}'


class JavHDFeeder(Feeder):
    def feed(self, urls):
        for url_dict in urls:
            self.logger.info('JAV Feeder put %d,%s in queue' % (url_dict[0], url_dict[1]))
            self.output(url_dict)


class JavHDParser(Parser):
    def parse(self, response):
        soup = BeautifulSoup(response.content, 'lxml')
        image_links = soup.select('.pure-u-1-5.pure-u-xs-1-2 > \
                .back-fhg > .thumb-fhg > a')
	media_links = soup.find_all(itemprop='image');

        for link in image_links:
            yield dict(file_url=response.request.url+link['href'])

        for link in media_links:
            yield dict(file_url='https:'+link['href'])



class JavHDDownloader(Downloader):
    # override download filename
    def get_filename(self, task, default_ext):
        """Use uuid to generate unique filename"""
        url_path = urlparse(task['file_url'])[2]
        extension = url_path.split('.')[-1] if '.' in url_path else default_ext
        file_name = str(uuid.uuid4())
        return '{}.{}'.format(file_name, extension)

    # override store_meta to store image info to server
    def store_meta(self, filename, user_id):
        """ Store filename and user_id to server db """
        http_service.store_image(filename, user_id)


class JavHDCrawler(Crawler):
    def __init__(self,
                feeder_cls=JavHDFeeder,
                parser_cls=JavHDParser,
                downloader_cls=JavHDDownloader,
                *args,
                **kwargs):
        super(JavHDCrawler, self).__init__(
                feeder_cls, parser_cls, downloader_cls, *args, **kwargs)


if __name__ == '__main__':
    # setup logger
    logging.basicConfig()
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.INFO)

    # http service to commuite with local server
    http_service = HttpService()
    # Page number
    page = 1
    # Dest urls
    urls = []

    # get all the stars first
    stars = http_service.get_stars()
    if isinstance(stars, list):
        for star in stars:
            name = star['name'].lower()
            user_id = star['id']
            while True:
                # Construct our search url
                url = SEARCH_HOST_URL.format(name=name, page=page)
                # Start to get resposne
                r = requests.get(url)
                if r.status_code == 200:
                    # Update our page
                    logger.info('success get url: %s' % url)
                    page += 1
                    soup = BeautifulSoup(r.content, 'lxml')
                    site_links = soup.select('.gallery > a')
                    # Extract all the href from link
                    for link in site_links:
                        # Check if href is our demand
                        if link['href'] is not None and \
                            'japanhdv.com' in link['href'] and \
                                http_service.check_black_url(link['href']):
                            urls.append([user_id, link['href']])
                else:
                    page = 1
                    break
    else:
        logger.warning('stars is empty')

    print("Total javhd url %d" % len(urls))
    jav_crawler = JavHDCrawler(feeder_threads=10,
            parser_threads=10,
            downloader_threads=10,
            storage={'backend': 'FileSystem', 'root_dir': '/var/www/blue/storage/app/image/'},
            log_level=logging.INFO)
    jav_crawler.crawl(feeder_kwargs=dict(urls=urls),
                          downloader_kwargs=dict(max_num=100000, min_size=None))
