# -*- Coding = utf-8 -*-
# @time: 2021/2/1 17:15
# Author: YKL

"""
通过JavaScript逆向获得数据接口爬取页面动态内容

CREATE DATABASE `spider` DEFAULT CHARSET UTF8;
USE `spider`;
CREATE TABLE `tb_image` (
  `no` int(11) NOT NULL AUTO_INCREMENT,
  `title` varchar(255) NOT NULL,
  `width` int(11) DEFAULT NULL,
  `height` int(11) DEFAULT NULL,
  `data` longtext,
  `url` varchar(1023) DEFAULT NULL,
  PRIMARY KEY (`no`)
);
"""
import base64
import json
import threading
from concurrent.futures.thread import ThreadPoolExecutor

import MySQLdb
import requests


def get_db_connection():
    """获取数据库连接"""
    return MySQLdb.connect(host='39.105.56.50', port=3306,
                           database='python2005', user='root',
                           password='Yukl.911', charset='utf8',
                           autocommit=True)


def ensure_connection():
    """获得跟线程绑定的数据库连接"""
    # local对象代表线程的本地资源（线程自己持有的资源） ---> Thread-Local对象
    my_data = threading.local()
    # 检查thread_local对象是否有名为conn的属性
    if not getattr(my_data, 'conn', None):
        # 给thread_local对象绑定名为conn属性，conn属性的值就是数据库连接对象
        # setattr(thread_local, 'conn', get_db_connection())
        my_data.conn = get_db_connection()
    return my_data.conn


def crawl_images_info(page):
    """爬取图片信息"""
    images_info = []
    resp = requests.get(f'http://image.so.com/zjl?ch=beauty&sn={(page - 1) * 30}&listtype=new&temp=1')
    beauty_list = json.loads(resp.text)['list']
    for beauty in beauty_list:
        title = beauty['title']
        width, height = beauty['width'], beauty['height']
        url = beauty['qhimg_url']
        images_info.append((title, width, height, url))
    return images_info


def save_to_db(pool, images_info):
    """将图片信息保存到数据库"""
    with ensure_connection().cursor() as cursor:
        for image_info in images_info:
            cursor.execute(
                'insert into tb_image (title, width, height, url) '
                'values (%s, %s, %s, %s)',
                image_info
            )
            *_, url = image_info
            pool.submit(download_image, cursor.lastrowid, url)


def download_image(no, image_url):
    """下载编码图片并保存到数据库中"""
    resp = requests.get(image_url)
    data = base64.b64encode(resp.content).decode()
    with ensure_connection().cursor() as cursor:
        cursor.execute(
            'update tb_image set data=%s where no=%s',
            (data, no)
        )


def main():
    """主函数"""
    with ThreadPoolExecutor(max_workers=16) as pool:
        for page in range(1, 11):
            images_info = crawl_images_info(page)
            save_to_db(pool, images_info)


if __name__ == '__main__':
    main()
