# -*- coding: utf-8 -*-
"""
@Time   : 2019/11/24 21:19
@Author : LeeCQ

目标从妹子图网站（http://www.mzitu.com/）上趴取所有图片。
    1. 得到所有图集的url；
    2. 得到所有图片的url；
    3. 得到每一张图片。
"""
import requests
import time
import os
import re
import json
from bs4 import BeautifulSoup
# local
from HTTP.Download_Mzitu.Mzitu_db import MzituDB, MySQLInfo


class MzituDownload:
    """

    """

    SAVE_PATH = './'  # 文件主路径
    ALBUM_PATH = SAVE_PATH + 'album/'
    SUP_DIR = SAVE_PATH + '_sup/'
    f_all_album_url = SUP_DIR + '_all_album.txt'  # 存放相册URL的文件
    f_every_album_url = './sup/{album_name}.txt'  # 存放每一个相册中的每一个URL的文件
    f_encoding = 'utf-8'  # 文件编码

    URL = 'https://www.mzitu.com/all/'  # 爬取的URL地址
    HEADERS = {
        'Referer': 'https://www.mzitu.com/',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'zh-Hans-CN, zh-Hans; q=0.5',
        'Cache-Control': 'max-age=0',
        'Accept': 'text/html, application/xhtml+xml, application/xml; q=0.9, */*; q=0.8',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18362'
    }

    def __init__(self):
        if not os.path.exists(self.SUP_DIR): os.mkdir(self.SUP_DIR)
        if not os.path.exists(self.ALBUM_PATH): os.mkdir(self.ALBUM_PATH)
        self._db = MzituDB()

    def set_path(self, path):
        self.SAVE_PATH = path

    @staticmethod
    def open_url(url, headers):
        """  """
        _start_open_url = time.time()
        headers['Host'] = 'www.mzitu.com'
        _response = requests.get(url, headers=headers, timeout=3)
        # print("OPEN_URL() 运行时间：", time.time()-_start_open_url)
        return BeautifulSoup(_response.text, 'lxml')

    def sql_all_album_append(self, href, text):
        """写入数据库"""
        print(f'* 正在更新数据库{self._db.sheet_all_album}... ', end='  ')
        idd = href.split('/')[-1]
        if self._db.all_album_append(idd, text, href):
            print(f'\r* {idd}已存在...', end='', flush=True)
        else:
            print(f'\r已写入{idd}', end='', flush=True)

    def all_albums(self, url, headers):
        """ """
        print("正在解析全部的图集...")
        soup = self.open_url(url, headers)
        albums = []
        with open(self.f_all_album_url, 'w', encoding=self.f_encoding) as _afa:
            # for 相册URL，相册名 in []
            _sql_start = time.time()
            for href, text in [(url.get('href'), url.text) for url in soup.select('p.url > a')][::-1]:
                _afa.write(href + '\t' + text + '\n')
                albums.append(href)
                self.sql_all_album_append(href, text)
            print('更新数据库用时: ', time.time()-_sql_start)
        print('')
        return albums

    def one_album(self, album_url):
        """
        取得一个相册中没证图片的绝对URL，和该相册的相关信息
        :param album_url: 这个相册的URL
        :return: data = {'idd': idd,
                         'title': re.sub('[:*?"<>|]', '-', title),
                         'times': times,
                         'tags': tags,
                         'images': images
        }
        """
        # 判断是否已经下载
        if self._db.view_column('mzitu', 'DOWNLOAD', WHERE=f'ALBUM_URL="{album_url}"')[0][0] == 'Y':
            return 1, album_url
        # album_url = 'https://www.mzitu.com/157154'
        soup = self.open_url(album_url, self.HEADERS)
        print(self.one_album.__name__, f'正在构建相册{album_url}中的URL...', end='\t')
        _start_one = time.time()
        idd = re.search(r'\d+', album_url).group()
        # print(soup.select('h2.main-title'))
        title = soup.select('h2.main-title')[0].text  # 获得套图的标题[0]
        tags = soup.select('div.main-meta > span')[0].text.replace('分类：', '').strip()  # 获得分类
        times = soup.select('div.main-meta > span')[1].text.replace('发布于', '').strip()  # 获得发布时间
        pagenums = soup.select('div.pagenavi > a')[-2].text  # 获得页码
        pic_url = [album_url + '/' + str(num) for num in range(1, int(pagenums) + 1)]  # 获取图片每个页面的URL，装入列表
        images = [self.open_url(url, self.HEADERS).select('div.main-image > p > a > img')[0].get('src') for url in
                  pic_url]
        # 获取每张图片的URL，装入列表
        print(self.one_album.__name__, "建构时间：", time.time() - _start_one)
        data = {
            'idd': idd,
            'title': re.sub('[:*?"<> |]', '-', title),
            'times': times,
            'tags': tags,
            'pagePhoto': pic_url,
            'images': images,
        }
        print(f"正在更新数据库{self._db.sheet_all_album}的{idd}项...", end='\t')
        if not self._db.all_album_update(idd, 'TIME', times, TAGS=tags, DATA=json.dumps(data)):
            print("更新成功")
        else:
            print('更行失败')
        # exit()
        return data

    def save_photo(self, data):
        """"""
        try:
            if data[0] == 1:
                print(data[1], '已经下载')
                return
        except:
            pass

        header = self.HEADERS
        header['Host'] = 'i5.meizitu.net'

        album_data = data
        album_path = self.ALBUM_PATH + album_data['title'] + '/'  # 构建path
        if not os.path.exists(album_path): os.mkdir(album_path)  # 创建相册
        images = zip(album_data['pagePhoto'], album_data['images'])
        for url, img in images:
            photo_name = img.split('/')[-1]  # 获取Photo Name
            photo_path = album_path + photo_name  # 拼接路径

            print('Download:', img)
            if not os.path.exists(photo_path):
                header['Referer'] = url
                photo_data = requests.get(img, headers=header).content  # 下载 源数据到变量
                with open(album_path + photo_name, 'wb') as fp:
                    if len(photo_data) > 10:
                        fp.write(photo_data)
            else:
                photo_data = open(photo_path, 'rb').read()  # 如果已经存在， 则从文件读取数据。
            if len(photo_data) > 10:
                if self._db.one_photo_append(idd=data['idd'],
                                             photo_name=photo_name,
                                             photo_uri=url,
                                             photo_url=img,
                                             local_path=os.path.realpath(photo_path).replace('\\', r'/'),
                                             photo_data=photo_data
                                             ):
                    print(f"Duplicate entry '{photo_name}' for key 'PRIMARY'")
                # if not self._db.one_photo_update(str(data['idd']), photo_name, photo_data): print(f'{photo_name}的DATA成功写入！')
        # 更新数据库 ： 图集已经下载；
        self._db.all_album_update(data['idd'], 'DOWNLOAD', 'Y')

    def run(self):
        """"""
        print("爬取开始...")
        for album in self.all_albums(self.URL, self.HEADERS):
            print('-' * 50)
            self.save_photo(self.one_album(album))
            time.sleep(0.5)


if __name__ == '__main__':
    URL = 'http://www.mzitu.com/all/'
    HEADERS = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
    }
    MzituDownload().run()
