# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://docs.org/en/latest/topics/items.html

from scrapy import Item,Field

class WeiboItem(Item):
    # 这里不指明保存的Collection的名称，直接根据话题分开存储内容，方便日后处理
    # collection = '好物分享'

    topic = Field() #对应话题
    wid = Field() #博文id
    user = Field()  # 博主名称
    uid = Field() #博主id
    content = Field() #博文内容
    forward_count = Field() #转发数
    comment_count = Field() #评论数
    like_count = Field() #点赞数
    pic_num = Field() #图片数量
    posted_at = Field() #发布时间
    is_weekend = Field() #发布时间是否是周末
    crawled_at = Field() #爬取时间

# class UserItem(Item):
#     collection = 'users'

    # id = Field()
    # name = Field()  # 用户名
    # avatar = Field()  # 头像（小）
    # cover = Field()  # 背景图片
    gender = Field()  # 性别
    description = Field()  # 简介
    fans_count = Field()  # 粉丝数
    follows_count = Field()  # 关注的用户数
    weibos_count = Field()  # 发博数/博文数
    verified = Field()  # 加V标示，是否微博认证用户（true-是；false-否）
    verified_reason = Field()  # 认证理由："搞笑幽默博主 头条文章作者"/微博认证
    verified_type = Field()  # 认证类型：0（3-机构认证；）
    verified_type_ext = Field()
    # follows = Field()
    # fans = Field()
    # crawled_at = Field()
