import requests
import xlwt
import re
import os


def crawl():
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36',
        'Connection': 'keep-alive',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        'Referer': 'https://www.huya.com/',
        'Accept-Encoding': 'gzip, deflate, sdch',
        'Accept-Language': 'en-US,en;q=0.8'
    }

    url = 'https://www.huya.com/g/100141'
    response = requests.get(url, headers=headers)
    html = response.text

    pics = re.findall(r'<img class="pic".*?data-original="(.*?)" src.*?>', html)
    hrefs = re.findall(r'<a href="(.*?)" class="title"', html)
    titles = re.findall(r'<a.*?class="title".*?title="(.*?)"', html)
    names = re.findall(r'<i class="nick".*?>(.*?)</i>', html)
    items = []
    count = 1

    data = xlwt.Workbook(encoding='utf-8')
    sheet = data.add_sheet('crawl', cell_overwrite_ok=True)
    style = xlwt.XFStyle()
    font = xlwt.Font()
    font.name = 'Calibri'
    font.colour_index = 4
    sheet.write(0, 0, '直播间链接')
    sheet.write(0, 1, '主播昵称')
    sheet.write(0, 2, '直播间标题')
    sheet.write(0, 3, '图片链接')
    path_excel = r'D:\crawl.xlsx'
    # 爬取并保存直播间信息
    for i in range(len(pics)):
        item = {'href': '', 'name': '', 'title': '', 'pic': ''}
        # 去除无效链接
        if (re.match(r'https://', pics[i])):
            item['href'] = hrefs[i]
            item['name'] = names[i]
            item['title'] = titles[i]
            item['pic'] = pics[i]
            sheet.write(count, 0, item['href'])
            sheet.write(count, 1, item['name'])
            sheet.write(count, 2, item['title'])
            sheet.write(count, 3, item['pic'])
            count += 1
            items.append(item)
    data.save(path_excel)

    # 下载直播间的图片
    for item in items:
        print(item)
        path_pic = 'D:/huya_pic/'  # 保存图片 的地址
        if not os.path.exists(path_pic):
            os.mkdir(path_pic)
        name = item['name']
        path_pic = path_pic + name + '.jpg'
        pic = item['pic']
        content = requests.get(pic).content
        with open(path_pic, 'wb') as f:
            f.write(content)
    print('一共爬取了%d条数据' % (count - 1))


if __name__ == '__main__':
    crawl()
