# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from pymongo import MongoClient
from scrapy.conf import settings
from scrapy import log
import pymysql
from wb.items import ProfileItem, FollowingItem, FollowedItem, TweetsItem
class WbPipeline(object):
    def process_item(self, item, spider):
        return item

class MongoDBPipeline(object):
    def __init__(self):
        connection = MongoClient(
            host=settings['MONGODB_SERVER'],
            port=settings['MONGODB_PORT']
        )
        db = connection[settings['MONGODB_DB']]
        self.info = db[settings['INFO']]
        self.following = db[settings['FOLLOWING']]
        self.followed = db[settings['FOLLOWED']]
        self.tweets = db[settings['TWEETS']]

    def process_item(self, item, spider):

        if isinstance(item, ProfileItem):
            self.info.insert(dict(item))
        elif isinstance(item, FollowingItem):
            self.following.insert(dict(item))
        elif isinstance(item, FollowedItem):
            self.followed.insert(dict(item))
        elif isinstance(item,TweetsItem):
            self.tweets.insert(dict(item))
        log.msg("Weibo  added to MongoDB database!",
                level=log.DEBUG, spider=spider)
        return item
# class MysqlPipeline(object):
#     def __init__(self):
#         self.con=pymysql.connect(
#     host='localhost',   #本地127.0.0.1
#     port=3306,          #默认3306端口
#     user='root',        #mysql最高权限用户
#     passwd='chenbo01',  #root用户密码
#     db='weibo',         #database name
#     charset='utf8'
# )
#     def process_item(self,item,spider):
#         pass
# #创建游标
# cur=conn.cursor()
# #创建一个user表,execute() 执行sql语句
# cur.execute("create table user1(username varchar(20),password varchar(30))")
# #插入一条数据
# cur.execute("insert into user values('cb','cb')")
# #查询
# a=cur.execute("select * from user")
# #获得数据---单条fetchone()  多条fetchmany()
# info=cur.fetchmany(a)
# #把数据打印出来
# for s in info:
#     print(s)

# #关闭游标
# cur.close()
# #提交事物 不写的话数据库不会改动
# conn.commit()
# #断开连接
# conn.close()
#     def process_item(self, item, spider):
    
'''
class ProfileItem(scrapy.Item):
    """
    账号的微博数、关注数、粉丝数及详情
    """
    _id=Field()
    nick_name=Field()
    profile_pic=Field()
    tweet_stats=Field()
    following_stats=Field()
    follower_stats=Field()
    sex=Field()
    location=Field()
    birthday=Field()
    bio=Field()

class FollowingItem(scrapy.Item):
    """
    关注的微博账号
    """
    _id=Field()
    relationship=Field()

class FollowedItem(scrapy.Item):
    """
    粉丝的微博账号
    """
    _id = Field()
    relationship = Field()

class TweetsItem(scrapy.Item):
    _id=Field()
    Content=Field()
    ID=Field()
    Co_oridinates=Field()
    Like=Field()
    Transfer=Field()
    Comment=Field()
    PubTime=Field()
    Tools=Field()
'''
