# -*- coding: utf-8 -*-
import os
import urllib2
from optparse import OptionParser
from html.parser import HTMLParser


class RedirectHandler(urllib2.HTTPRedirectHandler):
    """docstring for RedirctHandler"""

    def __init__(self):
        pass

    def http_error_301(self, req, fp, code, msg, headers):
        pass

    def http_error_302(self, req, fp, code, msg, headers):
        pass


class Bt15HtmParser(HTMLParser):
    def __init__(self, tag, attr_key, data, first_end=False, **kwargs):
        HTMLParser.__init__(self)
        self.target_tag = tag
        self.target_attr_key = attr_key
        self.target_data = data
        self.target_attrs = kwargs
        self.links = set()
        self.match_attrs_num = 0
        self.match_value = None
        self.first_end = first_end

    def clear_links(self):
        self.links.clear()

    def _match_one(self, attr_key, attr_value):
        return len(self.target_attrs) > 0 and attr_key in self.target_attrs \
               and self.target_attrs[attr_key] == attr_value

    def handle_starttag(self, tag, attrs):
        self.match_value = None
        self.match_attrs_num = 0
        if tag == self.target_tag:
            if len(attrs) == 0:
                pass
            else:
                for (attr_key, attr_value) in attrs:
                    if attr_key == self.target_attr_key:
                        self.match_value = attr_value
                    elif self._match_one(attr_key, attr_value):
                        self.match_attrs_num += 1

    def handle_data(self, data):
        if self.target_data is not None and self.target_data != data:
            self.match_value = None
        if self.first_end and self.match_value is not None:
            self.links.add(self.match_value)
            self.match_value = None

    def handle_endtag(self, tag):
        if self.match_attrs_num == len(self.target_attrs) and self.match_value is not None:
            self.links.add(self.match_value)
        self.match_value = None


def handle_url(url):
    request = urllib2.Request(url)
    request.add_header("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
                                     "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36")

    response = urllib2.urlopen(request)
    return response.read()


def add_option():
    default_dir = os.path.abspath(".")
    default_url = "http://mi.15bt.info"
    parser.add_option("-u", "--url", dest="url", default=default_url, help="input url")
    parser.add_option("-d", "--dir", dest="dir", default=default_dir, help="output dir")


if __name__ == '__main__':
    parser = OptionParser()
    add_option()
    options, _ = parser.parse_args()
    print options

    base_html = handle_url(options.url)

    # 一级
    top_hp = Bt15HtmParser(tag="link", attr_key="href", data=None, rel="archives")
    top_hp.feed(base_html)
    print top_hp.links
    top_hp.close()

    # 二级
    base_hp = Bt15HtmParser(tag="base", attr_key="href", data=None, first_end=True)
    second_hp = Bt15HtmParser(tag="a", attr_key="href", data='''偷窥原创''')
    second_links = []
    for s_url in top_hp.links:
        html = handle_url(s_url)

        base_hp.clear_links()
        base_hp.feed(html)

        second_hp.clear_links()
        second_hp.feed(html)

        print base_hp.links
        print second_hp.links

        base_url = base_hp.links.pop()
        [second_links.append(base_url + x) for x in second_hp.links]

    second_hp.close()
    base_hp.close()

    # 三级
    for s_url in second_links:
        html = handle_url(s_url)
        print html




