# 224
# 直播间爬虫
import pandas as pd
import requests
import time
import re
import os


def crawl(urls, path_excel):
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36',
        'Connection': 'keep-alive',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        'Referer': 'https://www.huya.com/',
        'Accept-Encoding': 'gzip, deflate, sdch',
        'Accept-Language': 'en-US,en;q=0.8'
    }
    writer = pd.ExcelWriter(path_excel)
    for url in urls:
        response = requests.get(url, headers=headers)
        html = response.text
        pics = re.findall(r'<img class="pic".*?data-original="(.*?)" src.*?>', html)
        hrefs = re.findall(r'<a href="(.*?)" class="title"', html)
        titles = re.findall(r'<a.*?class="title".*?title="(.*?)"', html)
        names = re.findall(r'<i class="nick".*?>(.*?)</i>', html)
        items = []
        for i in range(len(pics)):
            item = {'href': '', 'name': '', 'title': '', 'pic': ''}
            if (re.match(r'https://', pics[i])):
                item['href'] = hrefs[i]
                item['name'] = names[i]
                item['title'] = titles[i]
                item['pic'] = pics[i]
                items.append(item)
        # 保存至Excel
        df = pd.DataFrame({'直播间链接': hrefs, '昵称': names, '标题': titles, '图片链接': pics})
        df.to_excel(writer, sheet_name=url.rsplit('/', 1)[1], index=False)
        writer.save()
        print('=====表单已保存=====')

        # 下载图片
        for item in items:
            print(item)
            path_pic = 'D:/huya_pic/' + url.rsplit('/', 1)[1] + '/'
            if not os.path.exists(path_pic):
                os.makedirs(path_pic)
            path_pic = path_pic + item['name'] + '.jpg'
            content = requests.get(item['pic']).content
            with open(path_pic, 'wb') as f:
                f.write(content)
        print('\033[0;37;44m共计%d条数据\033[0m' % len(pics), '\n')


def play():
    print('当前时间:', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()), '等待爬取...')
    h = int(time.strftime("%H"))
    m = int(time.strftime("%M"))
    # 默认延迟1秒后执行
    s = (int(time.strftime("%S")) + int(1)) % 60
    urls = ['https://www.huya.com/g/lol', 'https://www.huya.com/g/100141', 'https://www.huya.com/g/7']
    while True:
        now = time.localtime(time.time())
        if now.tm_hour == h and now.tm_min == m and now.tm_sec == s:
            print('\033[0;37;44m开始爬取中...\033[0m\n')
            time.sleep(2)
            crawl(urls, r'D:\crawl.xlsx')
        time.sleep(1)


if __name__ == '__main__':
    play()