# -*- coding: utf-8 -*-
import scrapy
import os
import io
import time
import haul
from PIL import Image
from scrapy.log import logger
from scrapy import signals
from jparser import PageModel
from scrapy.spiders import Spider
from tools.tools import url_join, get_file_size, gen_uuid, md5
from shijue_auto_crawler.items import WorkerItem
from scrapy.utils.log import configure_logging
from MongoConn import MongoConn
from config import LOG_LEVEL, LOG_PATH,MAX_COUNT, REDIS_SETTINGS
import redis

class WorkerSpider(Spider):
    name = 'worker'

    def __init__(self,rule):
        self.log_setting()
        self.task_id = rule.task_id
        self.company_id = rule.company_id
        self.clue_id = rule.clue_id
        self.clue_name = rule.clue_name
        self.start_urls = rule.start_urls
        self.start_time = time.time()

        super(WorkerSpider, self).__init__()
        # 创建mongo对象
        self.conn = MongoConn()
        self.db = self.conn.db
        # 创建redis连接, 用于缓存小图片缓存, 和多次出现的图片缓存
        self.redis_connector = redis.StrictRedis(host=REDIS_SETTINGS['HOST'],
                                            port=REDIS_SETTINGS['PORT'],
                                            db=REDIS_SETTINGS['CACHE_DB'],
                                            password=REDIS_SETTINGS['PASSWORD'],
                                            )


    def log_setting(self):
        current_date = time.strftime("%Y%m%d", time.localtime(time.time()))
        log_path = os.path.join(LOG_PATH, "worker_{}.log".format(current_date))
        configure_logging({'LOG_FILE': log_path, "LOG_LEVEL": LOG_LEVEL})


    def extract(self,response):
        detail_t1 = time.time()
        # 获取一级域名 用于拼接连接不全的img_url
        base_url = '/'.join(response.url.split('/')[0:3])
        pm = PageModel(response.text)
        # jparser解析结果res
        pmt1 = time.time()
        res = pm.extract()
        pmt2 = time.time()
        logger.debug('jparser 解析时间: {} s'.format(pmt2-pmt1))
        # 临时存储文章段落
        content_temp = []
        # 存储文章中的图片信息
        # img_list = []
        for x in res['content']:
            # if x['type'] == 'image':
            #     img = x['data']['src']
            #     img_url = url_join(base_url, img)
                # img_list中存储一篇文章的多张图片 type:list
                # img_list.append(img_url)

            if x['type'] == 'text':
                content_temp.append(x['data'])
        img_result = haul.find_images(response.text)
        img_list = []
        format_l = ['jpg', 'png', 'gif', 'tif', 'bmp', 'jpeg']
        for img in img_result.image_urls:
            for f in format_l:
                if f in img:
                    img_list.append(url_join(base=base_url, url=img))




        # content 一篇新闻的内容  type:str
        content = ''.join(content_temp)
        # title 一篇新闻的标题  type:str
        title = res['title']
        detail_t2 = time.time()
        logger.debug('详情页解析时间:{}'.format(detail_t2-detail_t1))
        # 返回解析结果: content：文章内容, title：标题, img_list： 存储img url 的列表
        replace_key = ['\n', '\r', '\t']
        for key in replace_key:
            content = content.replace(key, '')
        return content, title, img_list
        # return content, title

    def parse(self, response):
        """
        解析页面  获取图片数据  发送二次请求
        :param response:
        :return:
        """

        item = WorkerItem()

        content,title,img_list=self.extract(response)
        # content,title=self.extract(response)

        item['title'] = title
        item['content'] = content
        item['task_id'] = self.task_id
        item['clue_id'] = self.clue_id
        item['company_id'] = self.company_id
        item['clue_name'] = self.clue_name
        item['url'] = response.url
        item['is_cover'] = 0

        logger.debug('image list : {}'.format(img_list))
        for image in img_list:
            try:
                if image == img_list[img_list.index(image)-1]:
                    continue
            except IndexError:
                # 如果异常 代表是第一张图  不做任何操作
                pass
            count = self.db["clue_id_{}".format(item['clue_id'])].find({'img_url': image}).count()
            # 是否发送图片二次请求
            if count >= MAX_COUNT:
                logger.info('过滤多次重复图片, 图片链接:{}'.format(image))
                continue
            if self.cache_exist(image):
                logger.info('过滤尺寸过小的图片, 图片链接:{}'.format(image))
                continue

            yield scrapy.Request(url=image,callback=self.img_parse,meta={"item":item})




    def img_parse(self,response):
        picture_t1 = time.time()
        item = response.meta['item']
        # 处理查询字符串
        try:
            img_format = response.url.split('.')[-1].split('?')[0]
        except:
            img_format = response.url.split('.')[-1]
        if len(img_format) > 5:
            img_format = 'jpeg'
        try:
            image_format = str(['jpg', 'png', 'gif', 'tif', 'bmp'].index(img_format) + 1)
        except:
            image_format = str(1)

        # uuid = gen_uuid()
        # img_path = os.path.join(IMAGE_PATH, uuid + '.' + img_format)
        # 写入文件
        # logger.info("write file : {}".format(img_path))
        # write_file_time = time.time()
        # with open(img_path, 'wb+') as f:
        #     f.write(response.body)
        #     file_size = get_file_size(img_path)
        # write_file_time_end = time.time()
        # logger.debug('写入文件时间:{}, 写入链接:{}'.format(write_file_time_end-write_file_time,response.url))
        # 通过二进制内容获取其尺寸
        logger.debug("read image size start : {}".format(response.url))
        try:
            temImg = io.BytesIO(response.body)
            image = Image.open(temImg)
            x_size = image.size[0]
            y_size = image.size[1]
        except Exception as e:
            logger.error('图片打开失败, 失败链接{}'.format(response.url))
            logger.error(e)
            return

        # 缓存小图片信息
        if x_size <= 50 and y_size <= 50:
            self.cache_set(str(response.url))
            return

        item['img_url'] = response.url
        item['width'] = x_size
        item['height'] = y_size
        item['img_path'] = ''
        item['img_format'] = image_format
        item['pic_size'] = int(x_size)*int(y_size)
        item['status'] = "1"
        logger.info("yield picture items, image_url :{}".format(item['img_url']))
        picture_t2 = time.time()
        logger.debug('单张图片解析时间: {} 图片链接:{}'.format(picture_t2-picture_t1, response.url))
        yield item


    def cache_exist(self, pic_link):
        # pic link 获取md5值
        _hash = md5(pic_link.encode())
        # 缓存图片信息
        return self.redis_connector.exists(_hash)


    def cache_set(self, pic_link):
        _hash = md5(pic_link.encode())
        self.redis_connector.setex(_hash, 3600 * 72, "1")

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = super(WorkerSpider, cls).from_crawler(crawler,*args,  **kwargs)
        crawler.signals.connect(spider.spider_closed, signals.spider_closed)
        return spider



    def spider_closed(self, spider):
        self.conn.close()
        self.end_time = time.time()
        logger.info('本页面数据抓取共执行时间:{}'.format(str(self.end_time-self.start_time)))
        logger.info('Closing {} spider'.format(spider.name))




