# -*- coding: utf-8 -*-
from scrapy_redis.spiders import RedisSpider
import json
from scrapy.http import Request
from bs4 import BeautifulSoup
from ..items import *
import os
import pymysql


class SaveimageSpider(RedisSpider):
    '''
    该爬虫是用来下载数据库中所有电影中的场景图

    使用需知：   1. 打开setting中的下载中间件，将# 'cut_movie.middlewares.RandomProxy': 100 注释取消
                2. 将middlewares中 RandomProxy类 注释取消，并在get_random_ip函数中设置自己的IP代理池，用来随机切换IP，防止豆瓣反爬
    '''
    name = 'saveImage'
    allowed_domains = ['www.douban.com']

    # start_urls = ['http://www.douban.com/']

    def __init__(self):
        RedisSpider.__init__(self)
        self.conn = pymysql.connect(host='127.0.0.1', user='root', password='123456', db='test',
                                    charset='utf8')
        self.cursor = self.conn.cursor()
        self.base_path = r'E:\project\large_images'  # 图片存储位置

    # 读取所有图片信息
    def parse(self, response):
        sql = "select movie_id, url from t_picture"
        self.cursor.execute(sql)

        for i, url in enumerate(self.cursor.fetchall()):
            # 将图片尺寸设置为大尺寸
            large_url = url[1].split('/')
            large_url[-3] = 'l'
            large_url = '/'.join(large_url)

            # 将图片下载请求包装至调度器
            yield Request(
                url=large_url,
                callback=self.download_image,
                meta={'class': str(url[0]), 'order': str(i)},
                dont_filter=True,
            )

    # 下载图片至本地
    def download_image(self, response):
        image_path = os.path.join(self.base_path, response.meta['class'])
        image_name = os.path.join(image_path, response.meta['order'] + '.jpg')
        with open(image_name, 'wb') as f:
            f.write(response.body)
