# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter

import MySQLdb
import pymysql
import csv
import os.path
import os
from pathlib import Path
import scrapy
import time

#得把items导入进来
from spider.spider.items import MovieItem, MovieCommentItem, MoviePlayUrlItem, DetailItem, MovieImage, UserHead
#！！！！里面的方法收尾记得一定要 return item！！！


###########      将数据导入mysql，里面具体数据还没改，作为参照      ##############
#pymysql虽然是非正式版，但是与mysqlclient除了包不一样，其他的都一样
# 导入sql即可像下面一样直接写入，也可以用item合成字符串
# 最便捷的做法是在item中建立一个类，将合成字符串的方法封装成两个方法，各个item则继承这个类。

'''
储存到mysql比较笨的办法：
class LearnscrapyPipeline:
    def process_item(self, item, spider):

        conn = MySQLdb.Connect(user="root", password="rock1204", host="127.0.0.1", port=3306, database="xpc", charset="utf8mb4")
        cursor = conn.cursor()
        if isinstance(item, MovieItem):
            cursor.execute('insert into db_movie(episodes_info, rate, cover_x, title, url, playable, cover, id, cover_y, is_new) VALUES ("%s", "%s", %d, "%s", "%s", "%s", "%s", "%s", %d, "%s");' % (
                item.get("episodes_info"), item.get("rate"), item.get("cover_x"), item.get("title"), item.get("url"), item.get("playable"), item.get("cover"), item.get("id"), item.get("cover_y"), item.get("is_new")
            ))
        elif isinstance(item, MovieCommentItem):
            cursor.execute('insert into db_movie_comment(content, movie_id, star) VALUES ("%s", "%s", "%s");' % (item.get("content"), item.get("movie_id"), item.get("star")))
        conn.commit()
        cursor.close()
        conn.close()
        return item
        
好一点的办法，拼接字符串：
class WZRYPipeline:
    def process_item(self, item, spider):
        conn = pymysql.Connect(user="root", password="rock1204", host="127.0.0.1", port=3306, charset="utf8mb4", database="xpc")
        cursor = conn.cursor()
        
        sql = "insert into " + item.table_name + "(" + ",".join(item.keys()) + ") VALUES (" + ",".join(["%s"] * len(item.keys())) + ");"
        args = tuple([item.get(key) for key in item.keys()])

        if isinstance(item, HeroItem):
             cursor.execute(item.to_sql(), args=item.get_args())
        elif isinstance(item, HeroSkillItem):
             cursor.execute(item.to_sql(), args=item.get_args())
        elif isinstance(item, HeroSkinItem):
             cursor.execute(item, args=item.get_args())
        
        conn.commit()
        cursor.close()
        conn.close()
        return item

最好的方法，增加item都不用改代码了：
'''
###### 将数据保存到sql ######
class SqlPipeline:#传数据也要注意编码问题utf8mb4可以储存一些常规表情符号
    # 在这里直接写代码不运行
    def process_item(self, item, spider):
        #创建数据库，先连接到mysql数据库
        conn = pymysql.Connect(user="root", password="", host="127.0.0.1", port=3306, charset="utf8mb4", database="xpc")
        cursor = conn.cursor()#创建一个游标
        
        if isinstance(item, MovieImage) or isinstance(item, UserHead):pass#图片就不存mysql了
        else:cursor.execute(item.to_sql(), args=item.get_args())

        conn.commit()
        cursor.close()# 先关闭游标
        conn.close()# 再关闭数据库连接
        return item

###########    进行数据的csv保存    ##############
class SpiderPipeline:
    # 在这里直接写代码不运行
    def process_item(self, item, spider):
        #如果传入的item是MovieItem就保存为"movies.csv"
        if isinstance(item, MovieItem):
            movie_title = item.get("movie_title") # 标题
            movie_desc = item.get("movie_desc")  # 视频说明
            movie_id = item.get("movie_id") # movie_id
            movie_duration = item.get("movie_duration")  # 时间
            movie_publish = item.get("movie_publish")  # 发布时间
            movie_play = item.get("movie_play")  # 播放数量
            movie_like = item.get("movie_like")  # 喜欢数量
            image_url = item.get("image_url")  # 图片地址
            with open("movies.csv", "a", newline='' ,encoding="utf-8-sig") as file:
                csv_writer = csv.writer(file)
                csv_writer.writerow((movie_title,movie_desc,movie_id,movie_duration,movie_publish,movie_play,movie_like,image_url))

        elif isinstance(item,DetailItem):
            with open("detail.csv", "a", newline='', encoding="utf-8-sig") as file:
                csv_writer = csv.writer(file)
                csv_writer.writerow((item.get("movie_id"), item.get("transmit")))

        elif isinstance(item, MovieCommentItem):
            movie_id = item.get("movie_id")
            content = item.get("content")
            user_headurl = item.get("user_headurl")
            user_id = item.get("user_id")
            followee = item.get("followee")
            follower = item.get("follower")
            collected = item.get("collected")
            user_like = item.get("user_like")
            with open("comments.csv", "a", newline='', encoding="utf-8-sig") as file:
                csv_writer = csv.writer(file)
                csv_writer.writerow((movie_id,content,user_id,followee,follower,collected,user_like,user_headurl))

        elif isinstance(item, MoviePlayUrlItem):
            with open("playurls.csv", "a", newline='', encoding="utf-8-sig") as file:
                csv_writer = csv.writer(file)
                csv_writer.writerow((item.get("movie_id"), item.get("profile"), item.get("url")))

        return item

class ImgPipeline:
    '''
    scrapy使用Request函数，URL为一个图片地址，那么返回的response是一个图片的bytes，
    使用response.text是无法获取到内容的，需要使用response.body，
    返回一个b'xxxxxxxxxxxxxxxxx'的字节内容，然后直接把这个字节内容保存为图片即可：
    '''

    #在这里直接写代码不运行
    def process_item(self, item, spider):
        # 给图片创建文件夹)
        pic_path = './视频标题图'
        if Path(pic_path).is_dir(): pass
        else:os.mkdir(pic_path)  # 创建文件夹
        pic_path = pic_path + '/'

        user_head_path = './用户头像图'
        if Path(user_head_path).is_dir():pass
        else:os.mkdir(user_head_path)  # 创建文件夹
        user_head_path = user_head_path + '/'

        if isinstance(item, MovieImage):
            image = item["image"]
            movie_id = item["movie_id"]
            with open(pic_path + str(movie_id) + ".jpg", "wb") as file:
                file.write(image.body)
                file.flush()

        elif isinstance(item, UserHead):
            image = item["user_head"]
            user_id = item["user_id"]
            with open(user_head_path + str(user_id) + ".jpg", "wb") as file:
                file.write(image.body)
                file.flush()
        return item

#还有一个储存图片的方法就是通过ImagesPipeline
#在spider的主文件夹中只要将图片在在url传输给item就可以进行下载
#在这里只要有列表页就可以进行下载了，无需再进一步操作，可以省掉两个item，两个spider
#当然还得在settings中添加对应的设置
    # 图片存储位置设定：千万别写错，写错了这个pipline不启动
        # IMAGES_STORE = 'D:\13488\Documents\PycharmProjects\spider\ImageSpider'，这时下载的文件名是自动填的
    #要重命名，指定不同的文件夹还需要重写file_path
from scrapy.pipelines.images import ImagesPipeline
import scrapy
import re
class LearnImgPipeline(ImagesPipeline):
    def get_media_requests(self, item, info):
        if isinstance(item, MovieItem):
            movie_id = item.get("movie_id")  # movie_id
            image_url = item.get("image_url")  # 图片地址
            yield scrapy.Request(image_url,meta={"movie_id": movie_id},dont_filter=True)
        if isinstance(item, MovieCommentItem):
            user_headurl = item["user_headurl"]
            user_id = item["user_id"]
            yield scrapy.Request(user_headurl, meta={"user_id": user_id}, dont_filter=True)

    def file_path(self, request, response=None, info=None, *, item=None):
        #image_guid = request.url.split('/')[-1]# 可以提取url最后的名称作为图片名。
        # name = re.sub(r'[？\\*|“<>:/]', '', name)# 过滤windows字符串，这里用的是id没乱码，不然不经过这么一个步骤，你会发现有乱码或无法下载
        if request.meta["movie_id"]:
            name = request.meta["movie_id"]  # 接收上面meta传递过来的图片名称
            # 分文件夹存储的关键：{0}对应着name；{1}对应着image_guid
            filename = u'{0}/{1}'.format('视频封面图', name)
        if request.meta["user_id"]:
            name = request.meta["movie_id"]  # 接收上面meta传递过来的图片名称
            # 分文件夹存储的关键：{0}对应着name；{1}对应着image_guid
            filename = u'{0}/{1}'.format('用户头像', name)
        return filename

