# -*- coding:utf-8 -*-
import re

from bs4 import BeautifulSoup


class HtmlParser(object):
    def __init__(self):
        pass

    @staticmethod
    def get_all_path(html_cont):
        if not html_cont:
            return
        soup = BeautifulSoup(html_cont, 'html.parser')
        soup_links = soup.find_all('a', href=re.compile('^http://[\w]+.daweijita.com/[\S]+[^.gif]$'))
        links = []
        for link in soup_links:
            links.append(re.sub('#[\S]*|/go/[\S]*|/guitar_friend[\S]*|/user/[\S]*|/author[\S]*|login?[\S]*', '', link['href']))
        return links

    @staticmethod
    def get_all_image(html_cont):
        if not html_cont:
            return
        soup = BeautifulSoup(html_cont, 'html.parser')
        links = soup.find_all('img', src=re.compile('^http://[\w]+.daweijita.com/[0-9]+/[0-9]+/[\w]+_[\S]+.gif'))
        images = []
        for link in links:
            images.append(link['src'])
        return images

    @staticmethod
    def get_path_and_name(html_cont):
        soup = BeautifulSoup(html_cont, 'html.parser')
        td_list = soup.find_all('td')
        data_list = []
        for td in td_list:
            res_data = {}
            if td.a:
                if td.a.span is None:
                    res_data['title'] = td.a.get_text()
                else:
                    res_data['title'] = td.a.span.get_text()
                res_data['url'] = td.a['href']
                data_list.append(res_data)
        return data_list

    def get_vip_gif(self, soup):
        print('vip')
        image = soup.find(class_='alignnone')
        print(image)
        if image:
            return image['src']
        else:
            return None

    def get_all_gif(self, soup):
        print('base')
        images = soup.find_all(class_='highslide-image', alt=re.compile('fufei'))
        print(images)

    def parse(self, html_cont):
        if html_cont is None:
            return
        soup = BeautifulSoup(html_cont, 'html.parser')
        image = soup.find(id='content-header')
        print(image)
        if image:
            return True, self.get_vip_gif(soup)
        else:
            return False, self.get_all_gif(soup)

    def _get_new_urls(self, page_url, soup):
        new_urls = set()
        links = soup.find_all('a', href=re.compile(r"http://www.daweijita.com/"))
        for link in links:
            new_url = link['href']
            new_full_url = urlparse(page_url, new_url)
            new_urls.add(new_full_url)
        return page_url

    def _get_new_data(self, page_url, soup):
        res_data = {'url': page_url}
        td_list = soup.find_all('td')
        print(td_list)
        res_data['title'] = ''
        for td in td_list:
            if td.a:
                if td.a.span is None:
                    res_data['title'] = td.a.get_text()
                else:
                    res_data['title'] = td.a.span
                print(res_data)
        return res_data
