# -*- coding: utf-8 -*-
import scrapy
import json
from weibo_scrapy.items import UserItem

class UinfoSpider(scrapy.Spider):
    name = 'uinfo'
    allowed_domains = ['m.weibo.cn']
    def start_requests(self):
        with open('问卷.txt', 'r') as fp:
            uids = [uid.strip() for uid in fp]
        # uids = ['119232937']
        for uid in uids:
            if uid:
                url = 'https://m.weibo.cn/api/container/getIndex?uid={uid}&luicode=10000011&lfid=231018{uid}_-_longbloglist_original&type=uid&value={uid}&containerid=100505{uid}'.format(uid=uid)
                yield scrapy.Request(url=url, meta={'uid': uid})
    def parse(self, response):
        r_data = json.loads(response.text)
        ok = r_data.get('ok','')
        data = r_data.get('data','')
        if not ok or not data:
            error_id = response.meta['uid']
            with open('error_ids.txt','a') as fp:
                fp.write(error_id+'\n')
            return
        userInfo = data.get('userInfo', '')
        if userInfo:
            item = user_deal(userInfo)
            yield UserItem(item)


def user_deal(user):
    item = {}
    item['uid'] = user['id']
    # 用户昵称
    item['screen_name'] = user['screen_name']
    # 用户主页
    item['profile_url'] = user['profile_url']
    # 用户描述
    item['description'] = user['description']
    # 用户关注数
    item['follow_count'] = user['follow_count']
    # 用户粉丝数
    item['followers_count'] = user['followers_count']
    # 发帖数
    item['statuses_count'] = user['statuses_count']
    # 用户性别
    item['gender'] = user['gender']
    # 用户认证信息
    item['verified'] = user['verified']
    item['verified_type'] = user['verified_type']
    item['verified_reason'] = user.get('verified_reason', '')
    # 微博等级
    item['urank']= user['urank']
    # 微博会员等级
    item['mbrank']= user['mbrank']
    return item