from urllib.request import urlopen
import urllib
import requests
from pathlib import Path
import urllib3


class ScrolllerHandler:
    def __init__(self, nsfw=True, max_chunk=100, limit=30, ):
        nsfw_params = 'isNsfw: true' if nsfw else ''
        self.max_chunk = max_chunk
        self.url = 'https://api.scrolller.com/api/v2/graphql'
        self.params = {
            'authorization': 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJjX2lkIjoiOTAzNTBhYjItY2QwMy00YzVmLTgzMDEtM'+
                            'DU4MTdjZGVmMWJhIiwiZXhwIjoxNjc0NDg1MzA3fQ.22BsNDlzOOqIcsvn-bVmFmJUAnFHj-bCsCObQKl2GF0',
            'query': '  query DiscoverSubredditsQuery( $filter: MediaFilter $limit: Int $iterator: String ) '
                     '{ discoverSubreddits( '+nsfw_params+' filter: $filter limit: $limit iterator: $iterator ) '
                     '{ iterator items { __typename id url title secondaryTitle description createdAt isNsfw subscribers isComplete ' +
                     'itemCount videoCount pictureCount albumCount isPaid username tags banner { url width height isOptimized } ' +
                     'isFollowing children( limit: 2 iterator: null filter: PICTURE disabledHosts: null homePage: true ) ' +
                     '{ iterator items { __typename id url title subredditId subredditTitle subredditUrl redditPath isNsfw ' +
                     'albumUrl hasAudio fullLengthSource gfycatSource redgifsSource ownerAvatar username ' +
                     'displayName isPaid tags isFavorite mediaSources { url width height isOptimized } blurredMediaSources' +
                     ' { url width height isOptimized } } } } } } ',
            'variables': {
                'limit': limit,
                'filter': 'PICTURE',
                'hostsDown': 'null',
                'iterator': 'NjYxN183ODQ3'
            }
        }

    def getImagesUrlList(self):
        req = requests.post(url=self.url, json=self.params)
        data = req.json()

        images = data.get('data').get('discoverSubreddits').get('items')
        image_list = list()
        for image in images:
            image_list.append(image.get('children').get('items')[0].get('mediaSources')[0].get('url'))
        return image_list

    def saveAllImages(self, path):
        http = urllib3.PoolManager()
        for i in range(self.max_chunk):
            image_list = self.getImagesUrlList()
            for image in image_list:
                img = http.request("GET", image)
                if img.status == 200:
                    print(image)
                    urllib.request.urlretrieve(image, path + Path(image).name)