# -*- coding: UTF-8 -*-
import sys
reload(sys)
sys.setdefaultencoding("utf8")
import json
import csv
import scrapy


class SinaSpider(scrapy.Spider):
    name = "sina"

    def start_requests(self):
        url = 'https://m.weibo.cn/api/container/getIndex?type=uid&value=%s' % self.uid
        yield scrapy.Request(url=url, callback=self.read_containerid)

    def read_containerid(self, response):
        print response.request.headers['User-Agent']
        res = json.loads(response.body)
        tabs = res.get('data', {}).get('tabsInfo', {}).get('tabs', [])
        web = res.get('ok')
        if web == 0:
            print('ID Not Exist')
            return
        else:
            self.containerid = None
            for tab in tabs:
                if tab['tab_type'] == 'weibo':
                    self.containerid = tab['containerid']
                    break
            store_file = 'uid-%s.csv' % self.uid
            c = open(store_file, 'wb')
            csv_write = csv.writer(c)
            csv_write.writerow(["user_id", "content", "comment_count", "reposts_coun",\
                                "favorite_count", "collect_count", "status_id",\
                                "images", "video", "created_at", "is_need_ocr", "is_repost"])
            # print('container ID: %s' % self.containerid)

            if self.containerid:
                url = 'https://m.weibo.cn/api/container/getIndex?containerid=%s' % (self.containerid)
                print('url: %s' % url)
                yield scrapy.Request(url=url, callback=self.display_tweets)

    def display_tweets(self, response):
        try:
            res = json.loads(response.body)
        except Exception:
            print('parse response body error: %s' % response.body)
            return
        else:

            self.build_output_json(res)
            cards = res.get('data', {}).get('cards', [])
            if len(cards) != 0:
                containerid = res.get('data', {}).get('cardlistInfo', {}).get('containerid')
                page = res.get('data', {}).get('cardlistInfo', {}).get('page')

                url = 'https://m.weibo.cn/api/container/getIndex?containerid=%s&page=%s' % (containerid, page)
                # print('page %s' % page)
                print('url: %s' % url)
                yield scrapy.Request(url=url, callback=self.display_tweets)

    def build_output_json(self, res):
        cards = res.get('data', {}).get('cards', [])

        store_file = 'uid-%s.csv' % self.uid
        output_file_name = 'uid-%s.json' % self.uid
        f = open(output_file_name, 'a')
        c = open(store_file, 'ab')
        csv_write = csv.writer(c)

        for card in cards:
            # 对象ID, agentid
            user_id = card.get('mblog', {}).get('user', {}).get('id')
            # 正文
            content = card.get('mblog', {}).get('text')
            # 评论数
            comment_count = card.get('mblog', {}).get('comments_count')
            # 转发数
            reposts_count = card.get('mblog', {}).get('reposts_count')
            # 点赞数
            favorite_count = card.get('mblog', {}).get('attitudes_count')
            # 收藏数
            collect_count = card.get('mblog', {}).get('pending_approval_count')

            # 状态唯一ID，用于去重，即抓取对象的唯一标志
            status_id = card.get('mblog', {}).get('bid')

            # 图片数组
            images = [p['url'] for p in card.get('mblog', {}).get('pics', []) if 'url' in p]

            # 视频地址
            video = card.get('mblog', {}).get('page_info', {}).get('page_url')

            # 创建时间
            created_at = card.get('mblog', {}).get('created_at')

            # # 只有图片，没有正文，需要ocr
            is_need_ocr = (len(card.get('mblog', {}).get('text',[])) == 0)

            # 是否为转发微博
            is_repost = ('retweeted_status' in card.get('mblog', {}))

            csv_write.writerows([[user_id, content, comment_count, reposts_count, \
                                  favorite_count, collect_count, status_id, images, video, created_at, is_need_ocr, \
                                  is_repost]])

            f.write(json.dumps({
                "user_id": user_id,
                "content": content,
                "comment_count": comment_count,
                "repost_count": reposts_count,
                "favorite_count": favorite_count,
                "collect_count": collect_count,
                "status_id": status_id,
                "images": images,
                "video": video,
                "created_at": created_at,
                "is_need_ocr": is_need_ocr,
                "is_repost": is_repost
            }))
            f.write('\n')
        c.close()
        f.close()
