# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymysql
import datetime


class MycwpjtPipeline(object):
    def __init__(self):
        #连接数据库，抛出异常
        try:
            self.conn = pymysql.connect(host="rm-bp151dcc75aycqd80to.mysql.rds.aliyuncs.com", user="root", passwd="Iscas123",db="knowledgeproject", port=3306, charset='utf8')
        except Exception as e:
            print("Error!:{0}".format(e))
    def process_item(self, item, spider):
        #将抓取的item项里面的内容，赋值给相应的变量，这里的变量都是数据库中的字段名
        title=item["title"]
        if title==None:#有些网页，它没有我们要抓取的内容，所以会是None，而往数据库里插入数据又需要这个字段，索性我就都赋值为'None'的字符串
            title="None"
        publish_time=item["publish_time"].strip()
        url=item["url"]
        origin=item["origin"]
        if origin==None:
            origin="None"
        type=item["type"]
        if type==None:
            type="None"
        if item["author"]!=None:
            author=item["author"].strip()
        else:
            author="None"
        strs=item["content"]
        if strs!=None:
            content=''.join(str(i) for i in strs)
            if url.find(474):#在抓取的时候，有一个网页，编号474，有问题，问题在于它的正文格式不对，多了引号，所以在抓取下来存入数据库的时候有错，索性就把这个网页的正文也变成'None'
                content="None"
        else:
            content="None"
        picture_loc=item["picture_loc"]
        print(content)
        if picture_loc==None:
            picture_loc="None"
        dts=datetime.datetime.now().strftime('"%Y-%m-%d %H:%M:%S"')#获取当前系统时间，并按照数据库的格式转化
        #往数据库里插入数据，并捕获异常
        #sql="insert into cra_technique_article(title,publish_time,origin,source,url,type,author,abstract,content,picture_loc,video_url,video_loc,create_at,update_at,delete_at,is_deleted,mark) values('"+title+"','"+publish_time+"','"+origin+"','None','"+url+"','"+type+"','"+author+"','None','"+content+"','"+picture_loc+"','None','None','"+dt+"','"+dt+"','"+dt+"',0,'None')"
        #try:
            #self.conn.query(sql)
           # self.conn.commit()
        #except Exception as e:
            #self.conn.rollback()  # 如果出错就回滚并且抛出错误收集错误信息。
           # print("Error!:{0}".format(e))
        #return item-pipeline
    def close_spider(self,spider):
        #关闭数据库连接
        self.conn.close()
