# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import pymysql
from douyin_pro import settings
import oss2
import datetime, time
from pymysql.converters import escape_string

class DouyinProPipeline:
    conn = None
    corsor = None
    def open_spider(self, spider):
        print("开始插入数据库")
        self.conn = pymysql.connect(host=settings.DB_HOST, database=settings.DB_DATABASE, user=settings.DB_USER,
                                    password=settings.DB_PASSWORD)
        self.cursor = self.conn.cursor()

    def process_item(self, item, spider):
        curse = self.cursor
        print(item)
        try:
            # 查询数据库
            select_sql = "SELECT * FROM ufutx_anchor_videos \
                       WHERE aweme_id = '{}'" .format(item['aweme_id'])
            curse.execute(select_sql)
            results = curse.fetchall()
            if len(results) == 0:
                cover = self.upload_file(item['local_cover'])
                link = self.upload_file(item['local_link'])
                created_at = updated_at = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                sql = "insert into ufutx_anchor_videos(m_id, title, account, link, aweme_id, cover, video_url, file_path, channel, created_at, updated_at) " \
                      "values ({},'{}','{}','{}','{}','{}','{}','{}','{}','{}','{}')".format(item['m_id'],escape_string(item['title']),item['account'], item['link'], item['aweme_id'], cover, link, item['local_link'], item['channel'], created_at, updated_at)
                print(sql)
                curse.execute(sql)
                self.conn.commit()
        except Exception as e:
            print('插入数据库错误')
            print(e)
            self.conn.rollback()
        return item

    def close_spider(self, spider):
        self.cursor.close()
        self.conn.close()
        print("插入数据库结束")


    def upload_file(self, file):
        # 阿里云配置
        ALIOSS_ID = settings.ALIOSS_ID
        ALIOSS_SECRET = settings.ALIOSS_SECRET
        ALIOSS_HOST = settings.ALIOSS_HOST
        ALIOSS_Endpoint = settings.ALIOSS_Endpoint
        ALIOSS_BUCKET = settings.ALIOSS_BUCKET
        auth = oss2.Auth(ALIOSS_ID, ALIOSS_SECRET)
        bucket = oss2.Bucket(auth, ALIOSS_Endpoint, ALIOSS_BUCKET)

        path = time.strftime("%Y%m", time.localtime())
        day = time.strftime("%d", time.localtime())
        file_name = file.rsplit('/', 1)[1]
        object = path + "/" + day + "/" + file_name
        result = bucket.put_object_from_file(object, file)
        file_url = ALIOSS_HOST + '/' + object
        return file_url

        # with open(file, 'rb') as fileobj:
        #     # Seek方法用于指定从第1000个字节位置开始读写。上传时会从您指定的第1000个字节位置开始上传，直到文件结束。
        #     fileobj.seek(1000, os.SEEK_SET)
        #     # Tell方法用于返回当前位置。
        #     current = fileobj.tell()
        #     # 填写Object完整路径。Object完整路径中不能包含Bucket名称。
        #     bucket.put_object(object, fileobj)

        # print(object)
        # result = bucket.put_object(object, 'Hello OSS')
        # print('http status: {0}'.format(result.status))
