# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from scrapy.pipelines.images import ImagesPipeline
from urllib import request
from urllib.parse import quote
import scrapy
import os
import string
import random
import pymysql

from caomei.settings import USER_AGENTS
from caomei.items import ImgItem


class CaomeiPipeline:
    def process_item(self, item, spider):
        return item


class DownLoadImgPipeline(ImagesPipeline):
    # 发送请求（下载图片，文件等）
    def get_media_requests(self, item, info):
        # 获取到图片的链接
        img_link = item['img_link']

        # headers = {
        #     'User-Agent': random.choice(USER_AGENTS),
        #     'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
        # }

        # 将域名替换成ip才能访问图片
        # target_link = img_link.replace("r.ikanhm.xyz", "69.197.146.58")

        #  进行请求
        print("download...")
        yield scrapy.Request(url=img_link, meta={'item': item})

    # 在file_path方法中处理下载完成的图片，就是保存到本地
    # 自定义文件名，如果不自定义，文件名就是一堆哈希
    def file_path(self, request, response=None, info=None):
        item = request.meta['item']

        # 接收上面meta中的图片链接，图片名称就在链接里面
        img_link = item['img_link']

        complete_img = img_link.split('/')[-1]

        # 章节目录
        chapter_dir = item['chapter_dir']

        return os.path.join(chapter_dir, complete_img)


class DownLoadPipeline:

    def process_item(self, item, spider):

        if isinstance(item, ImgItem):
            # print("img_link", item['img_link'], "chapter_dir", item['chapter_dir'])

            img_link = item['img_link']
            chapter_dir = item['chapter_dir']

            # if not os.path.exists(chapter_dir):
            #     os.mkdir(chapter_dir)

            # 前面已经替换
            # 将域名替换成ip才能访问图片
            # target_link = img_link.replace("r.ikanhm.xyz", "69.197.146.58")
            # print("target_link", target_link)

            # 因为图片路径有中文，不经过这样处理会报错
            # 将url中带的中文进行转码，而特殊符号不变
            image_link = quote(img_link, safe=string.printable)
            img_file = request.urlopen(image_link)
            byte = img_file.read()

            complete_img_name = image_link.split('/')[-1]  # 文件名，带后缀
            img_name = complete_img_name.split('.')[0]  # 文件名，不带后缀

            print("img name", img_name, "download complete! size:", byte.__len__() / 1024, "kb")

            # 以二进制格式打开一个文件只用于写入。如果该文件已存在则将其覆盖。如果该文件不存在，创建新文件。
            # write_file = open(chapter_dir + '\\' + file_name + '.jpg', 'wb')

            # 以二进制格式打开一个文件用于追加。如果该文件已存在，文件指针将会放在文件的结尾。也就是说，
            # 新的内容将会被写入到已有内容之后。如果该文件不存在，创建新文件进行写入。
            write_file = open(chapter_dir + '\\' + complete_img_name, 'ab')
            write_file.write(byte)
            write_file.close()

            print("img name", img_name, "save to local!")

        return item


class DBPipeline:
    def __init__(self):
        # 连接数据库
        self.conn = pymysql.connect(
            host="127.0.0.1",
            port=3306,
            db="caomei",
            user="root",
            passwd="123456",
            charset="utf8",
            use_unicode=True
        )
        # 通过cursor执行增删查改
        self.cursor = self.conn.cursor();

    def process_item(self, item, spider):
        # print("item", item)

        # 查询是否存在
        select_hanman_count_sql = "SELECT `id` FROM `hanman` WHERE `comic_link` = %s AND `chapter_link` = %s AND `img_link` = %s"

        insert_hanman_sql = "INSERT INTO `caomei`.`hanman`(`comic_link`, `comic_name`, `chapter_link`, `chapter_name`, `img_link`, `img_name`, `img_sufix`, `create_time`) VALUES (%s, %s, %s, %s, %s, %s, %s, NOW());"

        current_cursor = self.cursor
        try:
            if isinstance(item, ImgItem):
                current_cursor.execute(select_hanman_count_sql, [item['comic_link'], item['chapter_link'], item['img_link']])
                exist_hanman_record = current_cursor.fetchone()

                if exist_hanman_record:
                    pass
                else:
                    complete_img_info = item['img_link'].split("/")[-1]
                    img_name = complete_img_info.split(".")[0]
                    img_sufix = complete_img_info.split(".")[-1]
                    current_cursor.execute(insert_hanman_sql, [item['comic_link'], item['comic_name'], item['chapter_link'], item['chapter_name'], item['img_link'], img_name, img_sufix])

            self.conn.commit()
        except Exception as error:
            print("error", error)

            self.conn.rollback()

        return item

    # 关闭数据库连接
    # 需要分别的关闭指针对象和连接对象
    def close_spider(self, spider):
        print("close db connect")

        self.cursor.close()
        self.conn.close()