# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pandas
import pymysql
import csv
import pandas as pd



# 使用数据库保存
class LjPipeline(object):

    # 连接数据库
    def open_spider(self, spider):
        self.db = pymysql.connect(host='127.0.0.1', port=3306,
                                  user='root', password='123456',
                                  database='tencent', charset='utf8')
        self.cursor=self.db.cursor()

    # item数据
    def process_item(self, item, spider):
        print(item)
        name = item.get('name')
        content = item.get('content')
        price = item.get('price')
        longitude = item.get('longitude')
        latitude = item.get('latitude')
        sql = 'insert into lianjia(name,content,price,longitude,latitude) values(%r,%r,%r,%r,%r)' % (name, content, price, longitude, latitude)
        self.cursor.execute(sql)
        self.db.commit()
        return item
    # 关闭数据库
    def close_spider(self, spider):
        self.cursor.close()
        self.db.close()


# 保存为csv
class SaveCsv(object):
    # 使用csv
    # def process_item(self, item, spider):
    #     f = open('../lianjia.csv', 'a+',encoding='utf-8')
    #     writer = csv.writer(f)
    #     writer.writerow((item['name'], item['content'], item['price'], item['longitude'], item['latitude']))
    #     return item


    # 使用pandas
    def process_item(self, item, spider):
        item_list = []
        item_list.append(item)
        df = pd.DataFrame(data=item_list,)
        # 保存csv
        df.to_csv("../lj_info.csv", mode='a+',encoding='utf-8',columns=['name','content','price','longitude','latitude'],header=False)






