# -*- coding: utf-8 -*-
import scrapy
import json
from math import ceil
import re
school_dic = {
    'B2094750DB6AABFB409D':'北航',
    'B2094757DB6FA2FD439B':'北理',
    'B2094654D069A0F94093':'农大',
}
COOKIES = {
    'SUB': '_2A2526KZODeRhGeNK71UX8ivPzj-IHXVSEsoGrDV6PUJbkdANLUjEkW1NSXaxzIm_1lQLWBkg52_vSRBXc8LZlx8M;',
}
class ZujiSpider(scrapy.Spider):
    name = 'zuji'
    allowed_domains = ['m.weibo.cn']
    # start_urls = ['https://m.weibo.cn/api/container/getIndex?containerid=2306570016{school_id}&count=20&luicode=10000011&lfid=100101{school_id}_-_main'.format(school_id=school_id) for school_id in school_dic.keys()]

    def start_requests(self):
        urls = ['https://m.weibo.cn/api/container/getIndex?containerid=2306570016{school_id}&count=20&luicode=10000011&lfid=100101{school_id}_-_main'.format(school_id=school_id) for school_id in school_dic.keys()]
        for url in urls:
            yield scrapy.Request(url=url, cookies=COOKIES)

    def parse(self,response):
        try:
            data = json.loads(response.text)
        except:
            return
        ok = data['ok']
        if ok:
            user_ids = re.findall('timeline,(.*?)&title',response.text)
            school = school_dic[re.findall('containerid=2306570016(.*?)&',response.url)[0]]
            yield {'user_ids':user_ids,'school':school}
            total = int(data['data']['cardlistInfo']['total'])
            page = data['data']['cardlistInfo']['page']
            origin_url = response.url

            if 'page' not in origin_url:
                new_url = origin_url + '&page={}'.format(page)
            else:
                new_url = re.sub('&page=\d+','&page={}'.format(page),origin_url)
            yield scrapy.Request(url=new_url,callback=self.parse)