# -*- coding: utf-8 -*-
import os
import time
from urllib import parse
import requests
from fake_useragent import UserAgent
import pymysql
from lxml import etree
import threading
from concurrent.futures import ThreadPoolExecutor

class soogifSpider(object):
    def __init__(self):
        # 计数，请求一个页面的次数，初始值为1
        self.blog = 1
        self.db = pymysql.connect(host='192.168.1.250', user='root', password='Admin250#', database='wallpaper', charset='utf8mb4')
        self.cursor = self.db.cursor()

    def run(self):
        # http://c.biancheng.net/python_spider/what-is-spider.html
        try:
            dict_category = {
                # "Beauty": {"url": "https://www.soogif.com/gif/505893-{}-0-0.html", "offset": 1, "keyword": "比基尼"},
                # "Cute Pet": {"url": "https://www.soogif.com/gif/505405-{}-0-0.html", "offset": 1, "keyword": "萌宠"},
                # "Funny": {"url": "https://www.soogif.com/gif/334527-{}-0-0.html", "offset": 1, "keyword": "搞笑"},
                # "Delicacy": {"url": "https://www.soogif.com/gif/505369-{}-0-0.html", "offset": 1, "keyword": "美食"},
                "Scenery": {"url": "https://www.soogif.com/gif/582509-{}-0-0.html", "offset": 56, "keyword": "风景"}
            }
            for category in dict_category:
                offset_html = self.get_html(dict_category[category]['url'].format(1))
                offset_html = etree.HTML(offset_html)
                page = int(offset_html.xpath('//div[@id="search-pagination"]/a/text()')[-2])
                print('category [', category, '] total page:', page)
                category_keyword_count = 0
                for offset in range(1, page+1):
                    if category_keyword_count >= 300:
                        print('category [', category, '] end of keyword no match count:', category_keyword_count)
                        break
                    if dict_category[category]['offset'] > offset:
                        continue
                    else:
                        parse_html = self.get_html(dict_category[category]['url'].format(offset))
                        parse_html = etree.HTML(parse_html)
                        dlist = parse_html.xpath('//div[@id="search-container"]/a[@class="image-item"]/img')
                        data = []
                        for dl in dlist:
                            title = dl.xpath('./@alt')[0]
                            link = dl.xpath('./@src')[0]
                            original_link = parse.urlparse(link).path
                            if dict_category[category]['keyword'] in title:
                                item = (
                                    title,
                                    category,
                                    link,
                                    original_link,
                                    offset
                                )
                                data.append(item)
                                category_keyword_count = 0
                            else:
                                category_keyword_count = category_keyword_count + 1
                        data_len = len(data)
                        if data_len > 0:
                            sql = 'insert into gif (title, category, link, original_link, description) ' \
                                  'values (%s,%s,%s,%s,%s)'
                            self.cursor.executemany(sql, data)
                            self.db.commit()
                        print('category [', category, '] spider over page:', offset, ' data:', data_len)
                        time.sleep(0.5)
        except Exception as e:
            print('Run Error:', e)

    # 随机取一个UA
    def get_header(self):
        # 实例化ua对象
        ua = UserAgent()
        headers = {'User-Agent': ua.random}
        return headers

    # 发送请求
    def get_html(self, url):
        # 在超时间内，对于失败页面尝试请求三次
        if self.blog <= 5:
            try:
                res = requests.get(url=url, headers=self.get_header(), timeout=20)
                html = res.text
                return html
            except Exception as e:
                print('get_html [', url, '] Error: ', e)
                self.blog += 1
                self.get_html(url)

    def get_gif(self, directory):
        # 创建要存放图片的目录
        if not os.path.exists(directory):
            os.makedirs(directory)

        # 计算要循环多少次查询数据
        # sql = 'select count(*) from gif where created_date = "1000-01-01 00:00:00"'
        # self.cursor.execute(sql)
        # data = self.cursor.fetchone()
        # count = data[0]
        third_count = 50
        # if int(count % third_count) > 0:
        #     while_count = int(count / third_count) + 1
        # else:
        #     while_count = int(count / third_count)
        while_count = 1

        # 创建一个包含2条线程的线程池
        pool = ThreadPoolExecutor(max_workers=2)

        # 拉取要下载的图片
        while while_count > 0:

            sql = 'select id,category,link from gif where created_date="1000-01-01 00:00:00" limit ' + str(third_count)
            self.cursor.execute(sql)
            data = self.cursor.fetchall()
            while_count = while_count - 1

            update_ids = []



            for dt in data:
                file_name = directory + os.path.basename(dt[1])
                future1 = pool.submit(self.save_image, (dt[1], file_name))
                self.save_image(dt[1], file_name)
                update_ids.append(dt[0])

            sql = 'insert into gif (title, category, link, original_link, description) ' \
                  'values (%s,%s,%s,%s,%s)'
            self.cursor.executemany(sql, data)
            self.db.commit()



    # 下载图片
    def save_image(self, img_link, filename):
        html = requests.get(url=img_link, headers=self.get_header()).content
        with open(filename, 'wb') as f:
            f.write(html)
        print(filename, '下载成功')

    def __del__(self):
        # 关闭数据库连接
        self.cursor.close()
        self.db.close()


if __name__ == '__main__':
    save_path = os.path.abspath(os.path.dirname(__file__)) + '/images/'
    spider = soogifSpider()
    spider.get_gif(save_path)
