import scrapy
import json
from ..items import WeiboItem

class Spider0Spider(scrapy.Spider):
    name = "spider0"
    allowed_domains = ["weibo.com"]
    start_urls = [
        "https://weibo.com/ajax/statuses/mymblog?uid=6632842090&page=1&feature=0",
        "https://weibo.com/ajax/statuses/mymblog?uid=5836306077&page=1&feature=0"
    ]

    def start_requests(self):

        for url in self.start_urls:
            yield scrapy.Request(url)

    def parse(self, response):
        try:
            data_item = json.loads(response.text)
            since_id = data_item["data"]["since_id"]
            data_item = data_item["data"]["list"]
            user_id = response.url.split('&')[0].split("?")[1].split('=')[1]
        except:
            return None
        
        weibo_item = WeiboItem()
        
        for item in data_item:
            weibo_item['publish_time'] = item['created_at']
            weibo_item['name'] = item['user']['screen_name']
            weibo_item['text_raw'] = item['text_raw']
            weibo_item['user_id'] = user_id

            yield weibo_item

        if data_item != []:
            next_page_num = int(response.url.split('&')[1].split('=')[1])+1
            next_page_url = "https://weibo.com/ajax/statuses/mymblog?uid=" + user_id + "&page=" + str(next_page_num) + "&feature=0&since_id=" + since_id
            yield scrapy.Request(next_page_url)