import re

from lxml.html import soupparser

from spiders.BaseSpider import BaseSpider

__author__ = 'tzq139'


class Tu11Spider(BaseSpider):
    def __init__(self):
        self.siteURL = {'meituisiwatupian': 'http://www.tu11.com/meituisiwatupian/list_2_%s.html',
                        'xingganmeinvxiezhen': 'http://www.tu11.com/xingganmeinvxiezhen/list_2_%s.html',
                        'BEAUTYLEGtuimo': 'http://www.tu11.com/BEAUTYLEGtuimo/list_2_%s.html',
                        'shenghuomeinvzipai': 'http://www.tu11.com/shenghuomeinvzipai/list_2_%s.html'}
        self.headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            'Cookie': 'Hm_lvt_a521ae282c3c2742707c26ac9d3a8c59=1572082762,1573313321,1573964727; Hm_lpvt_a521ae282c3c2742707c26ac9d3a8c59=1573967994',
            'Host': 'www.tu11.com',
            'If-Modified-Since': 'Fri, 15 Nov 2019 11:09:46 GMT',
            'If-None-Match': '0b95330a59bd51:484',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36'
        }
        self.hosts = 'http://www.tu11.com'
        BaseSpider.__init__(self)

    def getContents(self, pageindex):
        contents = []
        for baseUrl in self.siteURL:
            url = self.siteURL[baseUrl] % str(pageindex)
            if pageindex == 1:
                url = 'http://www.tu11.com/%s/' % str(baseUrl)
            contenthtml = self.HttpHelper.getHtml(url, self.headers, 'gb2312', False)

            if contenthtml is not None:
                import lxml.html.soupparser as soupparser
                dom = soupparser.fromstring(contenthtml)
                nodes = dom.xpath("//div[@class='row']/ul/li/div/p[@class='textbox2']/a[1]")
                for item in nodes:
                    dict = {'Name': item.xpath("@title")[0], 'Url': self.hosts + item.xpath("@href")[0]}
                    contents.append(dict)
        return contents

    # 解析图片列表并循环解析图片地址
    def getPageImages(self, index):
        # 获取索引界面 套图地址
        print(u"正在收集第", index, u"页的MM信息")
        contents = self.getContents(index)
        if contents is not None:
            # 循环套图地址
            print(u"开始循环下载", index, u"页的MM信息")
            for item in contents:
                name = item['Name']
                url = item['Url']
                if "http" not in url:
                    return
                name = '/Users/mac/beautiful/' + name
                self.FileHelper.mkdir(name)
                index = 0
                while (True):
                    print('开始下载' + url)
                    detailhtml = self.HttpHelper.getHtml(url, self.headers, 'gb2312', False)
                    host = url.split('/')
                    base_host = url.replace(host[len(host) - 1], "")
                    dom = soupparser.fromstring(detailhtml)
                    next_nodes = dom.xpath(
                        "//div[@class='row dede_pages']/ul[@class='list-inline text-center nryfy']/li/a")
                    has_next = False
                    for next in next_nodes:
                        # http://www.tu11.com/meituisiwatupian/2019/15431_2.html
                        if next.text == '下一页':
                            next_url = next.xpath("@href")[0]
                            url = base_host + next_url
                            has_next = True
                            break
                    if has_next is not True:
                        break
                    # 下载
                    imagesurls = self.getImage(detailhtml)
                    download_headers = [
                        ('Accept', 'image/webp,image/apng,image/*,*/*;q=0.8'),
                        ('Accept-Encoding', 'gzip, deflate'),
                        ('Accept-Language', 'zh-CN,zh;q=0.9,en;q=0.8'),
                        ('Connection', 'keep-alive'),
                        ('Host', 'img15.haotuwu.com:8080'),
                        ('Referer', url),
                        ('User-Agent',
                         'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')
                    ]
                    if imagesurls is not None and len(imagesurls) > 0:
                        for i in range(0, len(imagesurls)):
                            self.FileHelper.saveImg(imagesurls[i], name + '/' + str(index) + str(i) + '.jpg',
                                                    download_headers)
                    index = index + 1

    # 分析套图数量
    def getAllnum(self, pagehtml):
        import lxml.html.soupparser as soupparser
        dom = soupparser.fromstring(pagehtml)
        nodes = dom.xpath("//div[@class='row']/ul/li/")
        return nodes

    # 解析图片
    def getImage(self, pagehtml):
        reg = r'<img src="(http:\/\/img15\.haotuwu\.com:8080\/picture.*?\.jpg)'
        reg_img = re.compile(reg, re.S)
        images = re.findall(reg_img,pagehtml)
        if len(images) == 0:
            return None
        return images

    # 加入生产队列
