# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html


import json
import codecs
import pymysql
from pymysql import connections
import time;  # 引入time模块


class CarnewsJsonPipeline(object):


    '''
        自定义导出json文件
    '''
    def __init__(self):
        # 使用codecs模块的打开方式，可以指定编码打开，避免很多编码问题
        self.file = codecs.open("CarnewsJson.json", "w", encoding="utf-8")



    def process_item(self, item, spider):
        lines = json.dumps(dict(item), ensure_ascii=False) + "\n"
        self.file.write(lines)

        # 注意别忘返回Item给下一个管道
        return item

    def close_spier(self, spider):
        self.file.close()

#mysql的存储
class CarnewsMysqlPipeline(object):


    def __init__(self):
        self.conn = pymysql.connect(host='127.0.0.1',user='root',passwd='123456',db ='ll_carnews')
        self.cursor = self.conn.cursor()

    def process_item(self, item, spider):
        ll_title = item['ll_title']
        ll_time = int(time.time())
        ll_pics = item['ll_pics']


        if item['catname'] == '新闻':
            ll_cid = 3
        elif item['catname'] == '导购':
            ll_cid = 4
        elif item['catname'] == '试驾评测':
            ll_cid = 5
        elif item['catname'] == '用车':
            ll_cid = 6
        elif item['catname'] == '文化':
            ll_cid = 7
        elif item['catname'] == '技术':
            ll_cid = 8
        else:
            ll_cid = 9



        ll_url = item['ll_url']
        sql ="insert into ll_article(ll_title,ll_time,ll_pics,ll_cid,ll_url) VALUES(%s,%s,%s,%s,%s)"



        self.cursor.execute(sql,(ll_title,ll_time,ll_pics,ll_cid,ll_url))
        self.conn.commit()
        return item


    def close_spider(self,spider):
        self.conn.close()