# coding=utf-8

import requests
import re
from bs4 import BeautifulSoup as soup
import json
import time
from item import Item
import db
from lxml import etree

import sys
reload(sys)
sys.setdefaultencoding("utf-8")

def getHtml(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'}
    rep = requests.get(url, headers=headers)
    return rep.text


class douyuParser(object):
    url = 'https://www.douyu.com/directory/all?page={}&isAjax=1'

    def __init__(self, url =None):
        self.platform = 'douyu'

    def parse(self):
        print u'开始抓取斗鱼直播数据...'
        page_idx=1
        page_count = self.getpagination()

        while page_idx<=page_count:
            html =getHtml(self.url.format(page_idx))
            sys.stdout.write(u'\r已爬取{}页'.format(page_idx))
            page_idx += 1

            root= etree.HTML(html)
            for i in root.xpath('//li'):
                roomid= i.xpath('a/@data-rid')[0]
                title = i.xpath('a/@title')[0]
                dyname =i.xpath('descendant::span[@class="dy-name ellipsis fl"]/text()')[0]
                dynum =i.xpath('descendant::span[@class="dy-num fr"]/text()')[0]
                id =self.platform+ '/' + roomid
                yield Item(id, title, dyname, dynum)

            time.sleep(0.2)
        print u'爬取结束'

    def getpagination(self):
        html=getHtml('https://www.douyu.com/directory/all')
        pattern = re.compile('count: "(\d*?)"')
        match = re.search(pattern, html)
        if match:
            return int(match.group(1))
        else:
            return 0

class zhanqiParser(object):
    url ='https://www.zhanqi.tv/api/static/v2.1/live/list/30/{}.json'

    def __init__(self, url= None):
        self.platform = 'zhanqi'

    def parse(self):
        print u'开始抓取战旗直播数据...'
        page_idx=1
        while True:
            json_str= getHtml(self.url.format(page_idx))
            json_data= json.loads(json_str)
            data= json_data['data']['rooms']
            if data:
                for i in data:
                    roomid= i['id']
                    title= i['title']
                    dyname = i['nickname']
                    dynum = i['online']

                    id = self.platform + '/' + roomid
                    yield Item(id , title, dyname, dynum)

            else:
                print u'爬取结束'
                break
            sys.stdout.write(u'\r已爬取{}页 '.format(page_idx))
            page_idx+=1
            time.sleep(0.2)


class xiongmaoParser(object):
    url = 'http://www.panda.tv/live_lists?status=2&order=person_num&pageno={}&pagenum=120'

    def __init__(self, url =None):
        self.platform = 'xiongmao'

    def parse(self):
        print u'开始抓取熊猫直播数据...'
        page_idx= 1
        while True:
            json_str= getHtml(self.url.format(page_idx))
            json_data= json.loads(json_str)
            data= json_data['data']['items']
            if data:
                for i in data:
                    roomid=i['id']
                    title = i['name']
                    dyname = i['userinfo']['nickName']
                    dynum = i['person_num']

                    id = self.platform + '/' + roomid
                    yield Item(id, title, dyname, dynum)

            else:
                print u'爬取结束'
                break
            sys.stdout.write(u'\r已爬取{}页 '.format(page_idx))
            page_idx+=1
            time.sleep(0.2)

class Container(list):
    pass    
    
def test():
    container= Container()
    p=douyuParser()

    for i in p.parse():
        container.append(i)

    db.db.insert_or_update(container)
    
if __name__=='__main__':
    test()