import requests
from lxml import etree
from time import sleep
import threading
import os
import redis


class MaoMi:
    '''多线程添加代理爬取妹子图'''

    def __init__(self):
        self.domain = 'http://www.271uu.com'
        # 分类
        self.index_urls = ['/tupian/list-%E4%BA%9A%E6%B4%B2%E8%89%B2%E5%9B%BE.html',
                           '/tupian/list-%E6%AC%A7%E7%BE%8E%E8%89%B2%E5%9B%BE.html']
        self.redisHelper = redis.Redis(
            connection_pool=redis.ConnectionPool(host='127.0.0.1', password=''))
        self.redisKey = 'maomiimgs'

    def get_proxy(self):
        '''获取代理'''
        # 在本地获取代理
        PROXY_POOL_URL = 'http://localhost:5555/random'
        try:
            response = requests.get(PROXY_POOL_URL)
            if response.status_code == 200:
                proxy = {
                    "http": "http://{}".format(response.text)
                }
                return proxy
            return None
        except ConnectionError:
            return None

    def create_dictionary(self, name):
        '''创建文件夹'''
        try:
            os.mkdir(name)
        except Exception as e:
            print(e)

    def judge_file(self, filename):
        '''判断文件是否存在'''
        try:
            with open(filename) as f:
                pass
            return True
        except:
            return False

    def get_response(self, url, headers=None, proxy=None):
        '''获取response响应'''
        response = requests.get(url, headers=headers, proxies=proxy)
        response.encoding = 'utf-8'
        return response

    def parse_html1(self, response):
        '''解析response内容获取套图名称及url地址'''
        html = etree.HTML(response.text)
        # 用于保存该页面套图名称及url地址
        page_img_urls = []
        # 套图名称及url地址保存在 class="postlist" 的div标签下的 ul标签下 li标签内
        page_img_content = html.xpath('//div[@class="box movie_list"]//ul//li')
        for img_content in page_img_content:
            # 用于保存单个套图名称及url地址
            img_info = {}
            # 套图名称在li标签下第一个a标签下的img标签的alt属性
            img_info['name'] = img_content.xpath('./a[1]/@title')[0]
            # 套图url在li标签下的第一个a标签的href属性
            img_info['url'] = self.domain + \
                img_content.xpath('./a[1]/@href')[0]
            # 保存到redis
            # self.get_picture_url(img_info['name'],img_info['url'])
            thread = threading.Thread(target=self.get_picture_url, args=(
                img_info['name'], img_info['url']))
            thread.start()
            sleep(1)
        return page_img_urls

    def get_next_page_url(self, response):
        '''获取下一页url地址'''
        html = etree.HTML(response.text)
        next_page_url = html.xpath('//a[text()="下一页"]/@href')
        # 判断下一页是否存在
        if len(next_page_url) > 0:
            return next_page_url[0]
        else:
            return None

    def get_picture_url(self, name, url):
        '''获取图片'''
        # 保存该套图所有url
        proxy = self.get_proxy()
        response = self.get_response(url, proxy=proxy)
        html = etree.HTML(response.text)
        # 获取图片url  url在class="content"的div标签下的img的data-original属性
        img_urls = html.xpath('//div[@class="content"]//img/@data-original')
        # 保存到redis
        self.redisHelper.rpush(self.redisKey, {'name': name, 'urls': img_urls})
        # self.redisHelper.sadd(self.redisKey, {'name': name, 'urls': img_urls})
        print('保存套图{}到redis成功'.format(name))

    def download_picture(self, name, url):
        '''下载图片'''
        # 设置headers请求头，获得下载图片权限
        headers = {
            'Referer': self.domain,
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36'
        }
        # 获取代理
        proxy = self.get_proxy()
        filename = 'Pictures/' + name + '/' + name + url[-6:]
        judge = self.judge_file(filename)
        if judge:
            print('文件{}已存在'.format(filename))
        else:
            img_content = self.get_response(
                url, headers=headers, proxy=proxy).content
            try:
                print(filename)
                with open(filename, 'wb') as f:
                    f.write(img_content)
                    # 设置延时
                    sleep(1)
            except Exception as e:
                print('图片{}下载失败'.format(filename))

    def spider(self):
        '''爬虫爬取妹子图图片（多线程，设置代理下载）
        1.获取首页response响应内容
        2.解析内容获取套图名称及url地址并下载（下载时多线程）
        3.获取下一页url地址
        4.循环获取下一页response响应内容
          解析内容获取套图名称及url地址并下载（下载时多线程）
        '''
        type_choice = eval(input('爬取猫咪图片url并保存到redis：\n1.亚洲\t2.欧美\n请选择：'))
        # 1.获取首页response响应内容
        url = self.domain + self.index_urls[type_choice-1]
        # 创建文件夹
        # self.create_dictionary('Pictures')
        response = self.get_response(url)
        # 2.解析内容获取套图名称及url地址
        self.parse_html1(response)
        # 3.获取下一页url地址
        next_page_url = self.get_next_page_url(response)
        # 4.获取下一页response响应内容 解析内容获取套图名称及url地址
        while next_page_url:
            # print(next_page_url)
            # 设置延时 2秒 防止请求过于频繁
            sleep(2)
            # 1.获取response响应内容
            response = self.get_response(self.domain + next_page_url)
            # 2.解析内容获取套图名称及url地址
            self.parse_html1(response)
            # 3.获取下一页url地址
            next_page_url = self.get_next_page_url(response)
        print('该类型保存完成')

    def download(self):
        '''
        下载redis中存储的套图
        '''
        self.create_dictionary('Pictures')
        count = self.redisHelper.llen(self.redisKey)
        print('共有{}个套图可供下载'.format(count))
        img = eval(self.redisHelper.lpop(self.redisKey))
        while img:
            self.create_dictionary(name='Pictures/' + img['name'])
            try:
                # 循环获取图片
                for url in img['urls']:
                    thread = threading.Thread(
                        target=self.download_picture, args=(img['name'], url))
                    thread.start()
            except Exception as e:
                print(e, '该套图保存失败')
            img = eval(self.redisHelper.lpop(self.redisKey))


if __name__ == '__main__':
    maomi = MaoMi()
    choice = eval(input('选择需要进行的操作：\n1.爬取\t2.下载\n请选择：'))
    if choice == 1:
        maomi.spider()
    else:
        maomi.download()
