"""
ref: https://github.com/flute/coub-crawler
"""
import requests
from bs4 import BeautifulSoup
import os
from urllib import request
import time
import random
import copy

headers={
    'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 5.1; Win64; x64; rv:60.0) Gecko/20100101 Firefox/2.0.0.11'
}


class CoubCrawler(object):
    def __init__(self):
        self.homepage = 'https://xslist.org'
        self.res_encoding = 'utf8'
        self.per_page = 25
    
    def run(self, tag):
        pass

    def get_request_url(self, tag):
        url = 'https://coub.com/api/v2/timeline/hot/{}/half?per_page={}'.format(tag, self.per_page)
        return url
    
    def get_video_urls(self, page_url, next=None):
        if next is not None:
            current_headers = copy.deepcopy(headers)
            current_headers['anchor'] = next
        else:
            current_headers = headers
        
        data = []
        try:
            url = self.get_request_url('movie')
            response = requests.get(url, headers=current_headers)
            print(response)

            # for item in container.find_all('li'):
            #     href = item.a['href']
            #     url = '{}/{}'.format(self.homepage, href)
            #     title = item.a.h3.text
            #     date = item.a.span.text
            #     # set_info.append((url, title, date))
        except Exception as e:
            print('error: ' + str(e))
        return

    def download_image(self, image_url, save_path):
        image_url = image_url.replace('https', 'http')
        try:
            req = request.Request(image_url, headers=headers)
            data = request.urlopen(req).read()
        except Exception as e:
            print('download error: ' + str(e))
            return 
        # save data
        if len(data) > 0:
            with open(save_path, 'wb') as f:
                f.write(data)
        time.sleep(0.1)


if __name__ == '__main__':
    crawler = CoubCrawler()
    page_url = crawler.get_request_url('movie')
    crawler.get_video_urls(page_url)
