#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from urllib import parse

from bs4 import BeautifulSoup


class HtmlResolver(object):
    # 解析器
    def resolver(self, url, htmlContent):
        if not bool(url) or not bool(htmlContent) :
            return

        soup = BeautifulSoup(htmlContent, 'html.parser', from_encoding="gbk")
        # 解析新的url
        newUrls = self._getNewUrls(url, soup)
        # 解析新的数据
        newData = self._getNewData(url, soup)
        # 返回数据
        return newUrls, newData

    def _getNewUrls(self, url, soup):
        newUrls = set()
        links = soup.find_all('a', href = re.compile(r"http:\/\/www\.mmonly\.cc\/(.*)?"))
        for link in links:
            newUrl = link["href"].rstrip('/')
            newUrls.add(newUrl)

        # 获取 href="288505_6.html" 链接
        links = soup.find_all('a', href = re.compile(r"\d+_\d+\.html"))
        for link in links:
            newUrl = link["href"]
            tempNum = newUrl.split("_")[0]
            newUrl = re.sub(r"\/" + tempNum + "(_\d+)?\.html$", "/" + newUrl, url)
            if url is not newUrl:
                newUrls.add(newUrl)

        return newUrls

    def _getNewData(self, url, soup):
        # 如果是详情页则获取数据
        photoObj = soup.find('div', class_="photo")
        if not bool(photoObj):
            return

        types = photoObj.find('div', class_="topmbx").find_all('a', href = re.compile(r"http:\/\/www\.mmonly\.cc\/(.*)+"))
        # 获取图片
        imgObj = soup.find('div', id="big-pic")
        if not bool(imgObj):
            return

        img = imgObj.find('img')
        # 获取第几页
        pageObj = soup.find('div', class_="pages")
        page = 1
        if bool(pageObj):
            pageLiObj = pageObj.find('li', class_="thisclass")
            if bool(pageLiObj) and pageLiObj is not None :
                page = int(pageLiObj.find('a').text.strip())

        if bool(types) and bool(img) and bool(img.get('src', False)):
            newData = {}
            newData["title"] = img["alt"]
            newData["src"] = img["src"]
            newData["page"] = int(page)
            newData["url"] = url
            newData["types"] = {}
            i = 0
            for type in types:
                newData["types"][i] = type.text.strip()
                i = i + 1

            return  newData
