# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

from scrapy import Item, Field


class UserItem(Item):
    # 指明保存的Collection的名称
    collection = 'users'

    id = Field()
    name = Field() #用户名
    avatar = Field() #头像（小）
    cover = Field() #背景图片
    gender = Field() #性别
    description = Field() #简述
    fans_count = Field() #粉丝数
    follows_count = Field() #关注的用户数
    weibos_count = Field() #发博数/博文数
    verified = Field() #加V标示，是否微博认证用户
    verified_reason = Field() #认证理由："搞笑幽默博主 头条文章作者"
    verified_type = Field() #认证类型：0
    follows = Field()
    fans = Field()
    crawled_at = Field()

# 用户的关注列表和粉丝列表
class UserRelationItem(Item):
    collection = 'users'

    # 用户的ID
    id = Field()
    # 用户关注列表
    follows = Field()
    # 粉丝列表
    fans = Field()
'''
并不意味着会将关注和粉丝列表存到一个单独的Collection里。
后面会用Pipe凛然对各个Item进行合并处理、和并存储到用户的Collection里
因此Item和Collection并不一定是完全对应
'''

class WeiboItem(Item):
    collection = 'weibos'

    id = Field()
    attitudes_count = Field() #点赞数
    comments_count = Field() #评论数
    reposts_count = Field() #转发数
    picture = Field() #图片相关，原始图片地址
    pictures = Field() #所有图片，如果包含图的话，有该项，是一个数组，内嵌字典
    source = Field() #微博来源
    text = Field() #微博信息内容
    # raw_text = Field()
    thumbnail = Field() #缩略图地址似乎等于pic中图片地址，尺寸是thumb

    user = Field()
    created_at = Field() #创建时间
    crawled_at = Field()


