# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import pymysql
from urllib import request
import os


class LofterPipeline:
    def process_item(self, item, spider):
        conn = pymysql.connect(host='127.0.0.1', user='root', password='root', database='lofter')
        title = item['title']
        photo_link = item['photo_link']
        dirname = 'f:/work/lofter/' + title
        if not os.path.exists(dirname):
            os.mkdir(dirname)
            for number in range(0, len(photo_link)):
                thisimgurl = photo_link[number]
                localfile = dirname + '/' + str(number) + '.jpg'
                request.urlretrieve(thisimgurl, localfile)

        photolink = '+'.join(photo_link)
        sql = "insert into photo_links(title, photolink) values('"+title+"', '"+photolink+"')"
        conn.query(sql)
        conn.close()
        return item