#!/usr/bin/env python3
#coding=utf-8
from WebGetImg import htmlGet
import re
from bs4 import BeautifulSoup

class yixiuba_1(htmlGet):
    def useProxy(self):
        return False

    def pageName(self):
        return 'yixiuba/性感美女写真'

    def getMainPageList(self):
        return ['http://yixiuba.com/xingganmeinvxiezhen/list_1_%d.html' % n for n in range(1, 5)]

    def getPageList(self, url):
        html = self.getHtml(url)
        reg = r'</span><a href="(.+?\.html)" class="title" target="_blank" title="(.+?)">'
        html_re = re.compile(reg)
        url_list = re.findall(html_re, html)
        html_list = []
        for t_url in url_list:
            html_list.append([t_url[1], 'http://yixiuba.com' + t_url[0], self.getTimeNow()])

        if url == self.getMainPageList()[0]:
            HideWeb = self.getHideWeb(html_list[0][1])
            while HideWeb:
                html_list.insert(0, HideWeb)
                HideWeb = self.getHideWeb(HideWeb[1])
        return html_list

    def getHideWeb(self, url):
        html = self.getHtml(url)
        soup = BeautifulSoup(html)
        subSoupList = soup.findAll('div', attrs={"class": "sxfy"})[0].find('li').find('a')
        if subSoupList:
            t_name = subSoupList.text
            t_url = 'http://yixiuba.com' + subSoupList['href']
            return [t_name, t_url, self.getTimeNow()]
        return []

    def getSubPageList(self, subPageUrl):
        subPageList = [subPageUrl, ]
        prefix = subPageUrl.replace(subPageUrl.split('/')[-1], '')
        html = self.getHtml(subPageUrl)
        reg = r"<a href='(\d+_\d+.html)'>"
        html_re = re.compile(reg)
        subPageList += [prefix + u for u in re.findall(html_re, html)]
        return subPageList

    def getImgListInSubPage(self, subSubPageUrl):
        html = self.getHtml(subSubPageUrl)
        reg = r'<img src="(.+?\/\d+\.jpg)" /></p>'
        img_re = re.compile(reg)
        img_list = re.findall(img_re, html)
        return img_list

class yixiuba_2(yixiuba_1):
    def pageName(self):
        return 'yixiuba/美腿丝袜图片'

    def getMainPageList(self):
        return ['http://yixiuba.com/meituisiwatupian/list_2_%d.html' % n for n in range(1, 5)]

class yixiuba_3(yixiuba_1):
    def pageName(self):
        return 'yixiuba/美女自拍街拍'

    def getMainPageList(self):
        return ['http://yixiuba.com/shenghuomeinvzipai/list_3_%d.html' % n for n in range(1, 5)]

class yixiuba_4(yixiuba_1):
    def pageName(self):
        return 'yixiuba/模特明星美女'

    def getMainPageList(self):
        return ['http://yixiuba.com/qingchunmeinvxiezhen/list_4_%d.html' % n for n in range(1, 18)]

class faceks(htmlGet):
    def useProxy(self):
        return False

    def pageName(self):
        return 'faceks'

    def getMainPageList(self):
        return ['http://sexy.faceks.com/?page=%d' % n for n in range(1, 10)]

    def getPageList(self, url):
        html = self.getHtml(url)
        html_list = []
        soup = BeautifulSoup(html)
        subSoupList = soup.findAll('div', attrs={"class": "m-post m-post-img "})
        for s_soup in subSoupList:
            t_name = s_soup.find('p').text
            date_soup = s_soup.find('a', attrs={'class': 'date'})
            t_url = date_soup['href']
            t_time = date_soup.text
            html_list.append([t_name, t_url, t_time])
        return html_list

    def getSubPageList(self, subPageUrl):
        return [subPageUrl, ]

    def getImgListInSubPage(self, subSubPageUrl):
        html = self.getHtml(subSubPageUrl)
        reg = r'<img src="(.+\.jpg)"/>'
        img_re = re.compile(reg)
        img_list = re.findall(img_re, html)
        return img_list

class meizitu(htmlGet):
    def useProxy(self):
        return True

    def pageName(self):
        return 'meizitu'

    def getMainPageList(self):
        return ['http://www.meizitu.com/', ] + ['http://www.meizitu.com/a/list_1_%d.html' % n for n in range(2, 40)]
        # return ['http://www.meizitu.com/a/list_1_%d.html' % n for n in range(1, 199)]

    def getPageList(self, url):
        html = self.getHtml(url, useProxy=True)
        html_list = []
        soup = BeautifulSoup(html)

        if url == 'http://www.meizitu.com/':
            subSoupList = soup.findAll('div', attrs={"class": "postContent"})
            for s_soup in subSoupList:
                name_soup = s_soup.find('a')
                t_name = name_soup['title']
                t_url = name_soup['href']
                html_list.append([t_name, t_url, self.getTimeNow()])
        else:
            subSoupList = soup.findAll('li', attrs={"class": "wp-item"})
            for s_soup in subSoupList:
                name_soup = s_soup.find('h3').a
                t_name = name_soup.text
                t_url = name_soup['href']
                # date_soup = s_soup.find('div', attrs={'class': 'metaLeft'})
                # t_time = date_soup('div')[1] + '-' + date_soup('div')[0]
                html_list.append([t_name, t_url, self.getTimeNow()])
        return html_list

    def getSubPageList(self, subPageUrl):
        return [subPageUrl, ]

    def getImgListInSubPage(self, subSubPageUrl):
        html = self.getHtml(subSubPageUrl, useProxy=True)
        reg = r'src="(.+?\/\d+\.jpg)"'
        img_re = re.compile(reg)
        img_list = re.findall(img_re, html)
        return img_list

class nnn2015_1(htmlGet):
    def useProxy(self):
        return True

    def pageName(self):
        return 'nnn2015/偷窥自拍'

    def getMainPageList(self):
        return ['http://www.2015mmm.com/show/toukuizipai/list_1_%d.html' % n for n in range(1, 5)]

    def getPageList(self, url):
        html = self.getHtml(url, useProxy=True)
        html_list = []
        soup = BeautifulSoup(html)
        subSoupList = soup.findAll('li')
        for s_soup in subSoupList:
            try:
                data_soup = s_soup.span.extract()
                t_time = data_soup.text
                t_name = s_soup.text
                t_url = 'http://2015mmm.com' + s_soup.a['href']
                html_list.append([t_name, t_url, t_time])
            except:
                pass
        return html_list

    def getSubPageList(self, subPageUrl):
        subPageList = [subPageUrl, ]
        html = self.getHtml(subPageUrl, useProxy=True)
        soup = BeautifulSoup(html)
        subSoupList = soup.find('p', attrs={"align": "center"})
        subSubPageSoup = subSoupList('a')
        if subSubPageSoup:
            try:
                t_url = subSubPageSoup['href']
                if t_url != '#':
                    subPageList.append(subPageUrl.replace(subPageUrl.split('/')[-1], '') + t_url)
            except:
                pass
        return subPageList

    def getImgListInSubPage(self, subSubPageUrl):
        html = self.getHtml(subSubPageUrl, useProxy=True)
        soup = BeautifulSoup(html)
        imgContent = soup.find('div', attrs={"class": "content"})
        imgSoup = imgContent('img')
        imgList = []
        for i in imgSoup:
            imgList.append(i['src'])
        return imgList

class nnn2015_2(nnn2015_1):
    def pageName(self):
        return 'nnn2015/亚洲图片'

    def getMainPageList(self):
        return ['http://www.2015mmm.com/show/yazhoutupian/list_2_%d.html' % n for n in range(1, 5)]

class nnn2015_3(nnn2015_1):
    def pageName(self):
        return 'nnn2015/欧美图区'

    def getMainPageList(self):
        return ['http://www.2015mmm.com/show/oumeituqu/list_3_%d.html' % n for n in range(1, 5)]

class nnn2015_4(nnn2015_1):
    def pageName(self):
        return 'nnn2015/丝袜美腿'

    def getMainPageList(self):
        return ['http://www.2015mmm.com/show/siwameitui/list_4_%d.html' % n for n in range(1, 5)]

class nnn2015_5(nnn2015_1):
    def pageName(self):
        return 'nnn2015/清纯唯美'

    def getMainPageList(self):
        return ['http://www.2015mmm.com/show/qingchunweimei/list_24_%d.html' % n for n in range(1, 5)]

class nnn2015_6(nnn2015_1):
    def pageName(self):
        return 'nnn2015/明星淫乱'

    def getMainPageList(self):
        return ['http://www.2015mmm.com/show/mingxingyinluan/list_6_%d.html' % n for n in range(1, 5)]

class nnn2015_7(nnn2015_1):
    def pageName(self):
        return 'nnn2015/卡通动漫'

    def getMainPageList(self):
        return ['http://www.2015mmm.com/show/katongdongman/list_7_%d.html' % n for n in range(1, 5)]

class k8mm_siwa(htmlGet):
    def useProxy(self):
        return False

    def pageName(self):
        return '8kmm/丝袜美女'

    def getMainPageList(self):
        return ['http://www.8kmm.com/mm/siwa/List_%d.html' % n for n in range(1, 5)]

    def getPageList(self, url):
        html = self.getHtml(url)
        html_list = []
        soup = BeautifulSoup(html)
        subSoupList = soup.findAll('a', attrs={'href': True, "target": "_self", 'title': True})
        for s_soup in subSoupList:
            t_name = s_soup['title']
            t_url = 'http://www.8kmm.com' + s_soup['href']
            html_list.append([t_name, t_url, self.getTimeNow()])
        return html_list

    def getSubPageList(self, subPageUrl):
        return [subPageUrl, ]

    def getImgListInSubPage(self, subSubPageUrl):
        html = self.getHtml(subSubPageUrl)
        img_list = []
        soup = BeautifulSoup(html)
        imgSoupList = soup.find('div', attrs={'id': 'MeiPhoto'})
        try:
            imgSoupList = imgSoupList('img')
        except Exception as ex:
            print(ex)
            exit()
        for img_soup in imgSoupList:
            img_list.append(img_soup['original'])
        return img_list

class k8mm_mote(k8mm_siwa):
    def pageName(self):
        return '8kmm/美女模特'

    def getMainPageList(self):
        return ['http://www.8kmm.com/mm/mote/List_%d.html' % n for n in range(1, 5)]# range(1, 16)

class k8mm_mingxing(k8mm_siwa):
    def pageName(self):
        return '8kmm/美女明星'

    def getMainPageList(self):
        return ['http://www.8kmm.com/mm/mingxing/List_%d.html' % n for n in range(1, 5)]# range(1, 10)

class k8mm_qingchun(k8mm_siwa):
    def pageName(self):
        return '8kmm/清纯美女'

    def getMainPageList(self):
        return ['http://www.8kmm.com/mm/qingchun/List_%d.html' % n for n in range(1, 5)] # range(1, 43)

class k8mm_xinggan(k8mm_siwa):
    def pageName(self):
        return '8kmm/性感美女'

    def getMainPageList(self):
        return ['http://www.8kmm.com/mm/xinggan/List_%d.html' % n for n in range(1, 5)] # range(1, 62)

class mn22_qcmv(htmlGet):
    def useProxy(self):
        return True

    def pageName(self):
        return '22mn/清纯美女'

    def getMainPageList(self):
        return ['http://www.22mn.net/qingchunmeinv/list_%d.html' % n for n in range(1, 4)]

    def getPageList(self, url):
        html = self.getHtml(url, useProxy=True)
        html_list = []
        soup = BeautifulSoup(html)
        subSoupList = soup.findAll('a', attrs={'href': True, "target": "_blank", 'title': True})
        for s_soup in subSoupList:
            if s_soup.img:
                t_name = s_soup['title']
                t_url = s_soup['href']
                html_list.append([t_name, t_url, self.getTimeNow()])
        return html_list

    def getSubPageList(self, subPageUrl):
        html = self.getHtml(subPageUrl, useProxy=True)
        sub_list = [subPageUrl, ]
        prefix = subPageUrl.replace(subPageUrl.split('/')[-1], '')
        soup = BeautifulSoup(html)
        t_subSoupList = soup.find('div', attrs={'class': 'page'})
        subSoupList = t_subSoupList('a')
        for s_soup in subSoupList:
            try:
                t_url = s_soup['href']
            except:
                continue
            try:
                sub_list.index(prefix + t_url)
            except:
                if t_url != '#':
                    sub_list.append(prefix + t_url)
        return sub_list

    def getImgListInSubPage(self, subSubPageUrl):
        html = self.getHtml(subSubPageUrl, useProxy=True)
        soup = BeautifulSoup(html)
        imgSoupList = soup.find('div', attrs={'class': 'picbox', 'id': True})
        try:
            imgSoup = imgSoupList.img
        except Exception as ex:
            print(ex)
            exit()
        return ['http://www.22mn.net' + imgSoup['src'], ]

class luolishidai_xiezhen(htmlGet):
    def useProxy(self):
        return True

    def pageName(self):
        return '萝莉时代/萝莉写真'

    def getMainPageList(self):
        return ['http://www.luolishidai.com/luolixiezhen/list_2_%d.html' % n for n in range(1, 11)]

    def getPageList(self, url):
        html = self.getHtml(url, useProxy=True)
        html_list = []
        soup = BeautifulSoup(html)
        mainSoup = soup.find('div', attrs={'class': 'imgList'})
        subSoupList = mainSoup.findAll('li')
        for s_soup in subSoupList:
            t_soup = s_soup.findAll('a')[-1]
            t_name = t_soup['title']
            t_url = 'http://www.luolishidai.com' + t_soup['href']
            html_list.append([t_name, t_url, self.getTimeNow()])
        return html_list

    def getSubPageList(self, subPageUrl):
        html = self.getHtml(subPageUrl, useProxy=True)
        sub_list = [subPageUrl, ]
        prefix = subPageUrl.replace(subPageUrl.split('/')[-1], '')
        soup = BeautifulSoup(html)
        t_subSoupList = soup.find('div', attrs={'class': 'page'})
        subSoupList = t_subSoupList('a')
        for s_soup in subSoupList:
            try:
                t_url = s_soup['href']
            except:
                continue
            try:
                sub_list.index(prefix + t_url)
            except:
                if t_url != '#':
                    sub_list.append(prefix + t_url)
        return sub_list

    def getImgListInSubPage(self, subSubPageUrl):
        html = self.getHtml(subSubPageUrl, useProxy=True)
        soup = BeautifulSoup(html)
        imgSoupList = soup.find('div', attrs={'class': 'arcBody'})
        try:
            imgSoup = imgSoupList.find('img')
        except Exception as ex:
            print(ex)
            exit()
        return ['http://www.luolishidai.com' + imgSoup['src'], ]

class luolishidai_rihanluoli(luolishidai_xiezhen):
    def pageName(self):
        return '萝莉时代/日韩萝莉'

    def getMainPageList(self):
        return ['http://www.luolishidai.com/rihanluoli/list_4_%d.html' % n for n in range(1, 13)]

class luolishidai_xingganluoli(luolishidai_xiezhen):
    def pageName(self):
        return '萝莉时代/性感萝莉'

    def getMainPageList(self):
        return ['http://www.luolishidai.com/xingganluoli/list_1_%d.html' % n for n in range(1, 12)]

class luolishidai_zhongguoluoli(luolishidai_xiezhen):
    def pageName(self):
        return '萝莉时代/中国萝莉'

    def getMainPageList(self):
        return ['http://www.luolishidai.com/zhongguoluoli/list_6_%d.html' % n for n in range(1, 13)]

class luolishidai_luoliyujie(luolishidai_xiezhen):
    def pageName(self):
        return '萝莉时代/萝莉御姐'

    def getMainPageList(self):
        return ['http://www.luolishidai.com/luoliyujie/list_7_%d.html' % n for n in range(1, 5)]

class luolishidai_oumeiluoli(luolishidai_xiezhen):
    def pageName(self):
        return '萝莉时代/欧美萝莉'

    def getMainPageList(self):
        return ['http://www.luolishidai.com/oumeiluoli/list_5_%d.html' % n for n in range(1, 3)]

class day9_meinv(htmlGet):
    def useProxy(self):
        return True

    def pageName(self):
        return '九天/美女写真'

    def getMainPageList(self):
        return ['http://www.9day.org/List/1-%d/' % n for n in range(1, 5)] # range(1, 162)

    def getPageList(self, url):
        html = self.getHtml(url, useProxy=True)
        html_list = []
        soup = BeautifulSoup(html)
        mainSoup = soup.find('ul', attrs={'id': 'mainlistUL'})
        subSoupList = mainSoup.findAll('li', attrs={'class': 'mainlist_li'})
        for s_soup in subSoupList:
            t_soup = s_soup.find('a')
            t_name = t_soup['title']
            t_url = 'http://www.9day.org' + t_soup['href']
            time_soup = s_soup.find('span', attrs='newDate')
            if not time_soup:
                time_soup = s_soup.find('span', attrs='oldDate')
            t_time = time_soup.text
            html_list.append([t_name, t_url, t_time])
        return html_list

    def getSubPageList(self, subPageUrl):
        html = self.getHtml(subPageUrl, useProxy=True)
        sub_list = [subPageUrl, ]
        prefix = 'http://www.9day.org'
        soup = BeautifulSoup(html)
        t_subSoupList = soup.find('div', attrs={'class': 'page'})
        subSoupList = t_subSoupList('a')
        for s_soup in subSoupList:
            try:
                t_url = s_soup['href']
            except:
                continue
            try:
                sub_list.index(prefix + t_url)
            except:
                if t_url != '#':
                    sub_list.append(prefix + t_url)
        return sub_list

    def getImgListInSubPage(self, subSubPageUrl):
        html = self.getHtml(subSubPageUrl, useProxy=True)
        imgList = []
        soup = BeautifulSoup(html)
        imgSoupList = soup.find('div', attrs={'class': 'mainNewsContent'})
        try:
            imgSoupList = imgSoupList.findAll('img')
        except Exception as ex:
            print(ex)
            exit()
        for imgSoup in imgSoupList:
            imgList.append(imgSoup['src'])
        return imgList

class day9_qingchun(day9_meinv):
    def pageName(self):
        return '九天/清纯美女'

    def getMainPageList(self):
        return ['http://www.9day.org/List/2-%d/' % n for n in range(1, 5)] # range(1, 20)

class day9_jiepai(day9_meinv):
    def pageName(self):
        return '九天/街拍图片'

    def getMainPageList(self):
        return ['http://www.9day.org/List/3-%d/' % n for n in range(1, 5)] # range(1, 29)

class day9_zhifu(day9_meinv):
    def pageName(self):
        return '九天/制服美女'

    def getMainPageList(self):
        return ['http://www.9day.org/List/4-%d/' % n for n in range(1, 5)] # range(1, 10)

class day9_siwa(day9_meinv):
    def pageName(self):
        return '九天/丝袜美腿'

    def getMainPageList(self):
        return ['http://www.9day.org/List/5-%d/' % n for n in range(1, 5)] # range(1, 121)

class day9_zipai(day9_meinv):
    def pageName(self):
        return '九天/美女自拍'

    def getMainPageList(self):
        return ['http://www.9day.org/List/6-%d/' % n for n in range(1, 5)] # range(1, 11)

class day9_yuzu(day9_meinv):
    def pageName(self):
        return '九天/美女玉足'

    def getMainPageList(self):
        return ['http://www.9day.org/List/7-%d/' % n for n in range(1, 5)]

class day9_xiangche(day9_meinv):
    def pageName(self):
        return '九天/香车美女'

    def getMainPageList(self):
        return ['http://www.9day.org/List/8-%d/' % n for n in range(1, 5)] # range(1, 52)

class day9_mote(day9_meinv):
    def pageName(self):
        return '九天/美女模特'

    def getMainPageList(self):
        return ['http://www.9day.org/List/9-%d/' % n for n in range(1, 5)] # range(1, 70)

class yangyan_meinv(htmlGet):
    def useProxy(self):
        return True

    def pageName(self):
        return '养眼美女/养眼美女'

    def getMainPageList(self):
        return ['http://www.yangyanmeitu.com/album-show-cid-1-page-%d.html' % n for n in range(1, 7)]

    def getPageList(self, url):
        html = self.getHtml(url, useProxy=True)
        html_list = []
        soup = BeautifulSoup(html)
        mainSoup = soup.find('div', attrs={'class': 'leftList5'})
        subSoupList = mainSoup.findAll('li')
        for s_soup in subSoupList:
            t_soup = s_soup.findAll('a')[-1]
            t_name = t_soup['title']
            t_url = 'http://www.yangyanmeitu.com/' + t_soup['href']
            html_list.append([t_name, t_url, self.getTimeNow()])
        return html_list

    def getSubPageList(self, subPageUrl):
        return [subPageUrl, ]

    def getImgListInSubPage(self, subSubPageUrl):
        html = self.getHtml(subSubPageUrl, useProxy=True)
        imgList = []
        soup = BeautifulSoup(html)
        imgSoupList = soup.find('div', attrs={'id': 'thumb'})
        try:
            imgSoupList = imgSoupList.findAll('img')
        except Exception as ex:
            print(ex)
            exit()
        for imgSoup in imgSoupList:
            imgList.append('http://www.yangyanmeitu.com/' + imgSoup['src'].replace('thumb_', ''))
        return imgList

class yangyan_qingchun(yangyan_meinv):
    def pageName(self):
        return '养眼美女/唯美清纯'

    def getMainPageList(self):
        return ['http://www.yangyanmeitu.com/album-show-cid-2-page-%d.html' % n for n in range(1, 10)]

class yangyan_xinggan(yangyan_meinv):
    def pageName(self):
        return '养眼美女/性感诱惑'

    def getMainPageList(self):
        return ['http://www.yangyanmeitu.com/album-show-cid-3-page-%d.html' % n for n in range(1, 19)]

class yangyan_bizhi(yangyan_meinv):
    def pageName(self):
        return '养眼美女/高清壁纸'

    def getMainPageList(self):
        return ['http://www.yangyanmeitu.com/album-show-cid-4-page-%d.html' % n for n in range(1, 3)]

class yangyan_keai(yangyan_meinv):
    def pageName(self):
        return '养眼美女/气质可爱'

    def getMainPageList(self):
        return ['http://www.yangyanmeitu.com/album-show-cid-9-page-%d.html' % n for n in range(1, 15)]

class yangyan_zipai(yangyan_meinv):
    def pageName(self):
        return '养眼美女/MM自拍'

    def getMainPageList(self):
        return ['http://www.yangyanmeitu.com/album-show-cid-11-page-%d.html' % n for n in range(1, 7)]

class yangyan_siwa(yangyan_meinv):
    def pageName(self):
        return '养眼美女/美腿丝袜'

    def getMainPageList(self):
        return ['http://www.yangyanmeitu.com/album-show-cid-12-page-%d.html' % n for n in range(1, 5)]

class yangyan_qingcheng(yangyan_meinv):
    def pageName(self):
        return '养眼美女/倾国倾城'

    def getMainPageList(self):
        return ['http://www.yangyanmeitu.com/album-show-cid-7-page-%d.html' % n for n in range(1, 4)]

class feiyang(htmlGet):
    def useProxy(self):
        return False

    def pageName(self):
        return '飞扬'

    def getMainPageList(self):
        return ["http://feiyangimage.pp.163.com/folder/"]

    def getPageList(self, url):
        html = self.getHtml(url)
        html_list = []
        soup = BeautifulSoup(html)
        subSoupList = soup.findAll('li', attrs={'class': 'w-item'})[:-1]
        for s_soup in subSoupList:
            t_soup = s_soup.find('a')
            t_name = t_soup['title']
            t_url = t_soup['href']
            html_list.append([t_name, t_url, self.getTimeNow()])
        return html_list

    def getSubPageList(self, subPageUrl):
        return [subPageUrl, ]

    def getImgListInSubPage(self, subSubPageUrl):
        html = self.getHtml(subSubPageUrl)
        imgList = []
        soup = BeautifulSoup(html)
        imgSoupList = soup.findAll('div', attrs={'class': 'pic-area'})
        for imgSoupSub in imgSoupList:
            try:
                imgSoup = imgSoupSub.find('img')
            except Exception as ex:
                print(ex)
                exit()
            imgList.append(imgSoup['data-lazyload-src'])
        return imgList

class iab_manhua(htmlGet):
    def useProxy(self):
        return False

    def pageName(self):
        return 'i-ab/少女漫画'

    def getMainPageList(self):
        return ['http://i-ab.org/a/shaonvmanhua/list_1_%d.html' % n for n in range(1, 7)]

    def getPageList(self, url):
        html = self.getHtml(url, useProxy=False)
        html_list = []
        soup = BeautifulSoup(html)
        subSoupList = soup.findAll('a', attrs={'class': ['pic', ], 'title': True, 'href': True})
        for s_soup in subSoupList:
            t_name = s_soup['title']
            t_url = 'http://i-ab.org' + s_soup['href']
            html_list.append([t_name, t_url, self.getTimeNow()])
        return html_list

    def getSubPageList(self, subPageUrl):
        html = self.getHtml(subPageUrl, useProxy=False)
        sub_list = [subPageUrl, ]
        prefix = subPageUrl.replace(subPageUrl.split('/')[-1], '')
        soup = BeautifulSoup(html)
        t_subSoupList = soup.find('ul', attrs={'class': 'page_list'})
        subSoupList = t_subSoupList.findAll('a', attrs={'href': True})
        for s_soup in subSoupList:
            try:
                t_url = s_soup['href']
            except:
                continue
            try:
                sub_list.index(prefix + t_url)
            except:
                if t_url != '#':
                    sub_list.append(prefix + t_url)
        return sub_list

    def getImgListInSubPage(self, subSubPageUrl):
        html = self.getHtml(subSubPageUrl, useProxy=False)
        imgList = []
        soup = BeautifulSoup(html)
        imgSoupList = soup.find('div', attrs={'class': 'article_body'})
        try:
            imgSoupList = imgSoupList.findAll('img')
        except Exception as ex:
            print(ex)
            exit()
        for imgSoup in imgSoupList:
            imgList.append(imgSoup['src'])
        return imgList

class iab_gif(iab_manhua):
    def pageName(self):
        return 'i-ab/gif'

    def getMainPageList(self):
        return ['http://i-ab.org/a/dongtaiGIF/list_2_%d.html' % n for n in range(1, 7)]

class iab_chunjie(iab_manhua):
    def pageName(self):
        return 'i-ab/纯洁图片'

    def getMainPageList(self):
        return ['http://i-ab.org/a/chunjietupian/list_7_%d.html' % n for n in range(1, 8)]


class cbcb084_1(htmlGet):
    def useProxy(self):
        return True

    def pageName(self):
        return 'cbcb084/美腿玉足'

    def getMainPageList(self):
        return ['http://www.cbcb084.com/html/part/index22.html'] + ['http://www.cbcb084.com/html/part/index22_%d.html' % n for n in range(2, 6)] #41

    def getPageList(self, url):
        html = self.getHtml(url, useProxy=False)
        html_list = []
        soup = BeautifulSoup(html)
        subSoupList = soup.findAll('table', attrs={'class': 'listt'})
        for s_soup in subSoupList:
            try:
                data_soup = s_soup.find('a')
                t_time = self.getTimeNow().split('-')[0] + '-' + s_soup.find('font').text
                t_name = data_soup.text
                t_url = 'http://www.cbcb084.com' + data_soup['href']
                html_list.append([t_name, t_url, t_time])
            except:
                pass
        return html_list

    def getSubPageList(self, subPageUrl):
        subPageList = [subPageUrl, ]
        # html = self.getHtml(subPageUrl, useProxy=True)
        # soup = BeautifulSoup(html)
        # subSoupList = soup.find('p', attrs={"align": "center"})
        # subSubPageSoup = subSoupList('a')
        # if subSubPageSoup:
        #     try:
        #         t_url = subSubPageSoup['href']
        #         if t_url != '#':
        #             subPageList.append(subPageUrl.replace(subPageUrl.split('/')[-1], '') + t_url)
        #     except:
        #         pass
        return subPageList

    def getImgListInSubPage(self, subSubPageUrl):
        html = self.getHtml(subSubPageUrl, useProxy=False)
        soup = BeautifulSoup(html)
        imgSoup = soup.findAll('img')
        imgList = []
        for i in imgSoup:
            imgList.append(i['src'])
        return imgList

class cbcb084_2(cbcb084_1):
    def pageName(self):
        return 'cbcb084/清纯唯美'

    def getMainPageList(self):
        return ['http://www.cbcb084.com/html/part/index23.html'] + ['http://www.cbcb084.com/html/part/index23_%d.html' % n for n in range(2, 6)] #48

class cbcb084_3(cbcb084_1):
    def pageName(self):
        return 'cbcb084/偷拍自拍'

    def getMainPageList(self):
        return ['http://www.cbcb084.com/html/part/index16.html'] + ['http://www.cbcb084.com/html/part/index16_%d.html' % n for n in range(2, 6)] #71

class cbcb084_4(cbcb084_1):
    def pageName(self):
        return 'cbcb084/卡通动漫'

    def getMainPageList(self):
        return ['http://www.cbcb084.com/html/part/index19.html'] + ['http://www.cbcb084.com/html/part/index16_%d.html' % n for n in range(2, 6)] #55

class cbcb084_5(cbcb084_1):
    def pageName(self):
        return 'cbcb084/欧美图片'

    def getMainPageList(self):
        return ['http://www.cbcb084.com/html/part/index18.html'] + ['http://www.cbcb084.com/html/part/index18_%d.html' % n for n in range(2, 6)] #66

class cbcb084_6(cbcb084_1):
    def pageName(self):
        return 'cbcb084/亚洲图片'

    def getMainPageList(self):
        return ['http://www.cbcb084.com/html/part/index17.html'] + ['http://www.cbcb084.com/html/part/index17_%d.html' % n for n in range(2, 6)] #68
		
class cbcb084_7(cbcb084_1):
    def pageName(self):
        return 'cbcb084/乱伦性爱'

    def getMainPageList(self):
        return ['http://www.cbcb084.com/html/part/index20.html'] + ['http://www.cbcb084.com/html/part/index20_%d.html' % n for n in range(2, 6)] #52
		
class cbcb084_8(cbcb084_1):
    def pageName(self):
        return 'cbcb084/另类图片'

    def getMainPageList(self):
        return ['http://www.cbcb084.com/html/part/index21.html'] + ['http://www.cbcb084.com/html/part/index21_%d.html' % n for n in range(2, 6)] #48

class mmkao_beautyleg(htmlGet):
    def useProxy(self):
        return False

    def pageName(self):
        return 'mmkao/Beautyleg'

    def getMainPageList(self):
        return ['http://www.mmkao.net/Beautyleg/'] + ['http://www.mmkao.net/Beautyleg/%d.html' % n for n in range(2, 5)] # 38

    def getPrefix(self):
        return 'Beautyleg'

    def getPageList(self, url):
        html = self.getHtml(url, useProxy=False)
        html_list = []
        soup = BeautifulSoup(html)
        mainSoup = soup.findAll('ul', attrs={'class': 'photo'})
        for row_soup in mainSoup:
            subSoupList = row_soup.findAll('a')
            for s_soup in subSoupList:
                t_name = s_soup.text
                t_url = 'http://www.mmkao.net/' + self.getPrefix() + '/' + s_soup['href']
                html_list.append([t_name, t_url, self.getTimeNow()])
        return html_list

    def getSubPageList(self, subPageUrl):
        html = self.getHtml(subPageUrl, useProxy=False)
        sub_list = [subPageUrl, ]
        prefix = subPageUrl.replace(subPageUrl.split('/')[-1], '')
        soup = BeautifulSoup(html)
        t_subSoupList = soup.find('ul', attrs={'class': 'image'})
        subSoupList = t_subSoupList('a')
        for s_soup in subSoupList:
            try:
                t_url = s_soup['href']
            except:
                continue
            try:
                sub_list.index(prefix + t_url)
            except:
                if t_url != '#':
                    sub_list.append(prefix + t_url)
        return sub_list

    def getImgListInSubPage(self, subSubPageUrl):
        html = self.getHtml(subSubPageUrl, useProxy=False)
        imgList = []
        soup = BeautifulSoup(html)
        imgSoupList = soup.find('ul', attrs={'class': 'file'})
        try:
            imgSoupList = imgSoupList.findAll('img')
        except Exception as ex:
            print(ex)
            exit()
        for imgSoup in imgSoupList:
            imgList.append(imgSoup['src'].replace('\r', ''))
        return imgList

class mmkao_rqstar(mmkao_beautyleg):
    def pageName(self):
        return 'mmkao/RQ-STAR'

    def getMainPageList(self):
        return ['http://www.mmkao.net/' + self.getPrefix() + '/'] + ['http://www.mmkao.net/' + self.getPrefix() + '/%d.html' % n for n in range(2, 5)] # 26

    def getPrefix(self):
        return 'RQ-STAR'

class mmkao_rosi(mmkao_beautyleg):
    def pageName(self):
        return 'mmkao/ROSI'

    def getMainPageList(self):
        return ['http://www.mmkao.net/' + self.getPrefix() + '/'] + ['http://www.mmkao.net/' + self.getPrefix() + '/%d.html' % n for n in range(2, 5)] # 40

    def getPrefix(self):
        return 'ROSI'

class mmkao_ligui(mmkao_beautyleg):
    def pageName(self):
        return 'mmkao/ligui'

    def getMainPageList(self):
        return ['http://www.mmkao.net/' + self.getPrefix() + '/'] + ['http://www.mmkao.net/' + self.getPrefix() + '/%d.html' % n for n in range(2, 5)] # 31

    def getPrefix(self):
        return 'ligui'

class mmkao_4kstar(mmkao_beautyleg):
    def pageName(self):
        return 'mmkao/4K-Star'

    def getMainPageList(self):
        return ['http://www.mmkao.net/' + self.getPrefix() + '/'] + ['http://www.mmkao.net/' + self.getPrefix() + '/%d.html' % n for n in range(2, 5)] # 12

    def getPrefix(self):
        return '4K-Star'

class mmkao_xiuren(mmkao_beautyleg):
    def pageName(self):
        return 'mmkao/XiuRen'

    def getMainPageList(self):
        return ['http://www.mmkao.net/' + self.getPrefix() + '/'] + ['http://www.mmkao.net/' + self.getPrefix() + '/%d.html' % n for n in range(2, 5)] # 11

    def getPrefix(self):
        return 'XiuRen'

class mmkao_disi(mmkao_beautyleg):
    def pageName(self):
        return 'mmkao/DISI'

    def getMainPageList(self):
        return ['http://www.mmkao.net/' + self.getPrefix() + '/'] + ['http://www.mmkao.net/' + self.getPrefix() + '/%d.html' % n for n in range(2, 5)] # 14

    def getPrefix(self):
        return 'DISI'

class mmkao_nakedari(mmkao_beautyleg):
    def pageName(self):
        return 'mmkao/NAKED-ART'

    def getMainPageList(self):
        return ['http://www.mmkao.net/' + self.getPrefix() + '/'] + ['http://www.mmkao.net/' + self.getPrefix() + '/%d.html' % n for n in range(2, 5)] # 14

    def getPrefix(self):
        return 'NAKED-ART'

class mmkao_pans(mmkao_beautyleg):
    def pageName(self):
        return 'mmkao/PANS'

    def getMainPageList(self):
        return ['http://www.mmkao.net/' + self.getPrefix() + '/'] + ['http://www.mmkao.net/' + self.getPrefix() + '/%d.html' % n for n in range(2, 5)] # 14

    def getPrefix(self):
        return 'PANS'

class mmkao_3agirl(mmkao_beautyleg):
    def pageName(self):
        return 'mmkao/3Agirl'

    def getMainPageList(self):
        return ['http://www.mmkao.net/' + self.getPrefix() + '/'] + ['http://www.mmkao.net/' + self.getPrefix() + '/%d.html' % n for n in range(2, 5)] # 14

    def getPrefix(self):
        return '3Agirl'

class mmkao_ru1mm(mmkao_beautyleg):
    def pageName(self):
        return 'mmkao/RU1MM'

    def getMainPageList(self):
        return ['http://www.mmkao.net/' + self.getPrefix() + '/'] + ['http://www.mmkao.net/' + self.getPrefix() + '/%d.html' % n for n in range(2, 5)] # 10

    def getPrefix(self):
        return 'RU1MM'

class mmkao_std(mmkao_beautyleg):
    def pageName(self):
        return 'mmkao/ShowTimeDancer'

    def getMainPageList(self):
        return ['http://www.mmkao.net/' + self.getPrefix() + '/'] + ['http://www.mmkao.net/' + self.getPrefix() + '/%d.html' % n for n in range(2, 5)] # 6

    def getPrefix(self):
        return 'ShowTimeDancer'

class mmkao_mygirl(mmkao_beautyleg):
    def pageName(self):
        return 'mmkao/MYGIRL'

    def getMainPageList(self):
        return ['http://www.mmkao.net/' + self.getPrefix() + '/'] + ['http://www.mmkao.net/' + self.getPrefix() + '/%d.html' % n for n in range(2, 5)] # 5

    def getPrefix(self):
        return 'MYGIRL'

class mmkao_legku(mmkao_beautyleg):
    def pageName(self):
        return 'mmkao/Legku'

    def getMainPageList(self):
        return ['http://www.mmkao.net/' + self.getPrefix() + '/'] + ['http://www.mmkao.net/' + self.getPrefix() + '/%d.html' % n for n in range(2, 4)] # 4

    def getPrefix(self):
        return 'Legku'

CLASS_LIST = [yixiuba_1, yixiuba_2, yixiuba_3, yixiuba_4,
              faceks,
              meizitu,
              nnn2015_2, nnn2015_3, nnn2015_4, nnn2015_5, nnn2015_7,
              k8mm_siwa, k8mm_mote, k8mm_mingxing, k8mm_qingchun, k8mm_xinggan,
              mn22_qcmv,
              luolishidai_xiezhen, luolishidai_rihanluoli, luolishidai_xingganluoli, luolishidai_zhongguoluoli, 
			  luolishidai_luoliyujie, luolishidai_oumeiluoli, 
			  day9_meinv, day9_qingchun, day9_jiepai, day9_zhifu, day9_siwa, day9_zipai, day9_yuzu, day9_xiangche,
			  day9_mote, 
			  yangyan_meinv, yangyan_qingchun, yangyan_xinggan, yangyan_bizhi, yangyan_keai, yangyan_zipai, yangyan_siwa,
			  yangyan_qingcheng, 
			  feiyang,
			  iab_manhua, iab_gif, iab_chunjie,
              mmkao_beautyleg, mmkao_rqstar, mmkao_rosi, mmkao_ligui, mmkao_4kstar, mmkao_xiuren, mmkao_disi, mmkao_nakedari, mmkao_pans, mmkao_3agirl, mmkao_ru1mm, mmkao_std, mmkao_mygirl, mmkao_legku,
			  cbcb084_1, cbcb084_2, cbcb084_3, cbcb084_4, cbcb084_5, cbcb084_6, cbcb084_7, cbcb084_8]