# -*- coding: utf-8 -*-
import scrapy
import json
from taobo_live.items import TaoboLiveItem
import hashlib
import time


from taobo_live.redisfilter import BloomFilter
class TbliveSpider(scrapy.Spider):
    name = 'tblive'
    custom_settings = {
            'DEFAULT_REQUEST_HEADERS' : {
                'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
                'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',
        }
    }
    def start_requests(self):
        headers = {
            'referer': 'https://taobaolive.taobao.com/room/index.htm'
        }
        base_url ='https://taobaolive.taobao.com/api/get_videos/1.0?sortType=hot'

        yield scrapy.Request(base_url, headers=headers, callback=self.parse_hot)

    def parse_hot(self,response):
        try:
            parse_hotjson = json.loads(response.text)
            result_list = parse_hotjson.get('result').get('model').get('dataList')
            header = {
                'referer': 'https://taobaolive.taobao.com/room/index.htm'
            }
            second_url = 'https://taobaolive.taobao.com/api/get_videos/1.0?sortType=hot'
            yield scrapy.Request(second_url, headers=header, callback=self.parse_hot, dont_filter=True)

            for result in result_list:
                useritem = TaoboLiveItem()
                accountID = str(result.get('accountId'))
                useritem["accountId"] = result.get('accountId')
                useritem["accountNick"] = result.get('accountDO').get('accountNick')
                useritem["headImg"] = result.get('accountDO').get('headImg') if 'http'in result.get('accountDO').get('headImg') else 'http:'+result.get('accountDO').get('headImg')
                useritem["title"] = result.get('title')
                useritem["topic"] = result.get('topic') # 进入下一页的钥匙
                useritem['account_url'] = 'https://tblive.m.taobao.com/wow/tblive/act/host-detail?&broadcasterId={u}&suid={s}'.format(u=useritem["accountId"],s=useritem["topic"])

                bf = BloomFilter()

                if bf.isContains(accountID):  # 判断字符串是否存在
                    print('exists!')
                else:
                    print('not exists!')
                    bf.insert(accountID)

                    headers = {
                        'referer':'https://taobaolive.taobao.com/room/index.htm?spm=a21tn.8216370.2278281.1.2a145722OIS3Be&feedId={}'.format(useritem["topic"])
                    }
                    next_url = 'https://taobaolive.taobao.com/api/live_detail/1.0?creatorId=&liveId={}'.format(useritem["topic"])

                    yield scrapy.Request(next_url, headers=headers, callback=self.parse_fans, meta={'useritem': useritem})
        except Exception as e:
            pass

    def parse_fans(self,response):
        try:
            fans_json = json.loads(response.text)
            useritem = response.meta['useritem']
            fans_ = fans_json.get('result').get('broadCaster')
            useritem["fansNum"] = fans_.get('fansNum',0)
            useritem["roomNum"] = fans_.get('roomNum',0)
            # second_url = 'https://taobaolive.taobao.com/room/index.htm?feedId={}'.format(useritem["topic"])
            yield useritem
        except Exception as e:
            pass


