#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""MorningLake, my crawler framework

Morning Lake is a lake in my college

For education only.
"""

import requests
import bs4

def url2soup(url, headers={}, data=None):
    # url -> soup
    if data:
        response = requests.post(url, data=data, headers=headers)
    else:
        response = requests.get(url, headers=headers)
    # response.encoding == 'ISO-8859-1':
    encodings = requests.utils.get_encodings_from_content(response.text)
    if encodings:
        encoding = encodings[0]
    else:
        encoding = response.apparent_encoding
    encode_content = response.content.decode(encoding, 'replace')
    return bs4.BeautifulSoup(encode_content, "lxml")


class Crawler(object):
    """A toy example of crawling framework
    
    Only for teaching students with python
    """
    def __init__(self, url, tag='body'):
        """
        Arguments:
            url {str} -- url
        
        Keyword Arguments:
            tag {str} -- a tag name in url (default: {'body'})
        """
        self.url = url
        self.tag = tag
        self.visited = {url}

    def parse(self, *args, **kwargs):
        self.__soup = url2soup(self.url, *args, **kwargs)

    @property
    def soup(self):
        return self.__soup
    
    def get_urls(self, check=True):
        tag = self.soup
        if self.tag:
            tag = self.soup.find(self.tag)
        urls = []
        for a in tag.find_all('a'):
            if a.has_attr('href'):
                url = a['href']
                if url != '/' and url != self.url and not url.startswith('//'):
                    if check is True or check(url):
                        urls.append(url if url.startswith('http') else self.url + url)
        self.new_urls = set(urls) - self.excluded()
        return urls


    def find(self, *args, **kwargs):
        tag = self.soup
        if self.tag:
            tag = soup.find(tag)
        return tag.find(*args, **kwargs)

    def find_all(self, *args, **kwargs):
        tag = self.soup
        if self.tag:
            tag = soup.find(tag)
        return tag.find_all(*args, **kwargs)

    def run(self, check=True, op=print):
        urls = self.get_urls(check)
        for url in urls:
            op(url)

    def search(self, is_goal, depth=10):

        if is_goal(self.url):
            return self.url
        old = [self.url]       # old urls
        front = [self.url]
        k = 0
        while front and k < depth:
            new_front = []
            for f in front:
                new = Crawler(f).get_urls()
                new = set(new) - old
                if new:
                    old.extend(new)
                    for s in new:
                        if is_goal(s):
                            return s
                        new_front.append(s)
            front = new_front
            k += 1


if __name__ == '__main__':
    # test
    c = Crawler(url='https://www.taobao.com/')
    c.parse()

    # import re
    # rx = re.compile(r'.+/\d+\Z')
    # def check(url):
    #     return rx.match(url)

    c.run()

