import scrapy
import pymysql
# 自带下载方法 必须 页面爬取和下载需要区分开
from scrapy.pipelines.images import ImagesPipeline
class SpidersqlPipeline(object):
    def __init__(self):
        self.connect= pymysql.connect("localhost","root","root","boss" )
        self.cursor=self.connect.cursor()

    def process_item(self,item,spider):
        self.cursor.execute(
            """insert into info(work,pos,com,salary,edu,src) value (%s,%s,%s,%s,%s,%s)""",
            (item['work'],item['pos'],item['com'],item['salary'],item['edu'],item['src'])
        )
        self.connect.commit()
        return item

class SpiderPipeline(ImagesPipeline):
    def get_media_requests(self, item, info):
        name = item['com'];
        print("================"+name)
        yield scrapy.Request(url=item['src'], meta={'name':name,'type':1})
    def file_path(self, request, response=None, info=None):
        name = request.meta['name']+'.png';
        return name;
    def item_completed(self, results, item, info):
        if not results[0][0]:
             with open('img_error.txt', 'a',"utf-8") as f:
                error = str(item['com']+'---'+item['src'])
                f.write(error)
                f.write('\n')
        return item 


