# -*- coding: utf-8 -*-
import scrapy
from scrapy_redis.spiders import RedisSpider
import json
from scrapy.http import Request
from bs4 import BeautifulSoup
from ..items import *
import os
import pymysql


class ImagesSpider(RedisSpider):
    '''
    该爬虫是用来读取数据库中的所有电影信息，并从对应网站爬取其图片、台词、标签

    使用需知：   1. 打开setting中的下载中间件，将# 'cut_movie.middlewares.RandomProxy': 100 注释取消
                2. 将middlewares中 RandomProxy类 注释取消，并在get_random_ip函数中设置自己的IP代理池，用来随机切换IP，防止豆瓣反爬
    '''
    name = 'images'
    allowed_domains = ['www.douban.com']

    def __init__(self):
        RedisSpider.__init__(self)
        self.conn = pymysql.connect(host='127.0.0.1', user='root', password='123456', db='test',
                                    charset='utf8')
        self.cursor = self.conn.cursor()

    # 定义需要爬取的电影标签和其对应的数量
    def parse(self, response):
        sql = "select id, mid, movie_name from t_movie"
        self.cursor.execute(sql)
        for each in self.cursor.fetchall():

            # 处理图片信息调度器
            for i in range(3):
                yield Request(
                    url='https://movie.douban.com/subject/' + each[1] + '/photos?type=S&start=' + str(i * 30),
                    callback=self.get_image,
                    meta={'mid': each[0]},
                    dont_filter=True,
                )

            # 处理标签信息调度器
            yield Request(
                url='https://movie.douban.com/j/subject_abstract?subject_id=' + each[1],
                callback=self.get_target,
                meta={'mid': each[0]},
                dont_filter=True,
            )

            # 处理电影台词调度器
            yield Request(
                url='https://www.juzikong.com/s?q=' + each[2] + '&type=posts',
                callback=self.get_lines,
                meta={'mid': each[0]},
                dont_filter=True,
            )

    # 处理电影图片进管道
    def get_image(self, response):
        content = response.text
        soup = BeautifulSoup(content)
        items = soup.select(".cover a img[src]")

        mid = response.meta['mid']
        for item in items:
            image = ImageItem()
            image['mid'] = mid
            image['img_url'] = item['src']

            yield image

            # yield Request(
            #     url=item['src'],
            #     callback=self.download_image,
            #     meta={'info': image},
            #     dont_filter=True,
            # )

    # 处理标签信息进管道
    def get_target(self, response):
        content = json.loads(response.text)
        info = content['subject']

        mid = response.meta['mid']
        type_names = info['types']
        country_names = info['region']
        if type(country_names) == list:
            target_names = type_names + country_names
        else:
            target_names = type_names + [country_names]

        for name in target_names:
            target = TargetItem()
            target['mid'] = mid
            target['name'] = name

            yield target

    # 处理电影台词进管道
    def get_lines(self, response):
        content = response.text
        soup = BeautifulSoup(content)
        items = soup.select("section")

        mid = response.meta['mid']
        for i, item in enumerate(items):
            if i < 5:
                lines = LinesItem()
                lines['mid'] = mid
                lines['line'] = item.text

                yield lines
            else:
                break

    # 下载图片至本地
    # def download_image(self, response):
    #     info = response.meta['info']
    #     image_path = os.path.join(self.base_path, info['mid'])
    #     if not os.path.exists(image_path):
    #         os.mkdir(info['mid'])
    #
    #     image_name = os.path.join(image_path, str(info['order']))
    #     with open(image_name, 'wb') as f:
    #         f.write(response.body)
