#!/usr/bin/env python3
# -*- coding: utf-8 -*-

from selenium import webdriver
import time, csv


class SpiderDouYuApp():

    def __init__(self):
        self.url = 'https://www.douyu.com/directory/all'
        self.driver = webdriver.Chrome()

    def getZBContent(self):  # 采集数据
        # 1 睡眠10秒，加载数据
        time.sleep(10)

        # 2 持续下拉
        for i in range(2, 90):
            # {} 占位符，开阔一个空间，等待格式化
            js = "document.documentElement.scrollTop={}".format(i * 100)
            # 3 执行js语句
            self.driver.execute_script(js)

        # 直接滑动到底部
        # self.driver.execute_script('window.scrollTo(0,document.body.scrollHeight)')

        time.sleep(5)

        # 4 查询
        content_list = []
        # 获取所有li标签：直播模块
        li_list = self.driver.find_elements_by_xpath('//ul[@class="layout-Cover-list"]/li')
        for li in li_list:
            item = {}

            # 房间图片
            item['room_img'] = li.find_element_by_xpath('.//img[@class="DyImg-content is-normal "]').get_attribute(
                'src')

            # 房间标题
            item['room_title'] = li.find_element_by_xpath('.//h3[@class="DyListCover-intro"]').text

            # 房间类型
            item['room_type'] = li.find_element_by_xpath('.//span[@class="DyListCover-zone"]').text

            # 房间主播
            item['room_author'] = li.find_element_by_xpath('.//h2[@class="DyListCover-user"]').text

            # 房间热度
            item['room_hot'] = li.find_element_by_xpath('.//span[@class="DyListCover-hot"]').text

            # 5 保存列表
            content_list.append(item)

            # print(item)

        # 6 元素交互：点击下一页，判断下一页是否存在
        next_btn = self.driver.find_elements_by_xpath('//li[@class=" dy-Pagination-next"]')
        # 7 判断查询的标签是否存在，存在获取，不存在赋值为None
        next_btn = next_btn[0] if len(next_btn) > 0 else None

        # 8 返回数据
        return content_list, next_btn

        pass

    def save(self, content_list):  # 保存

        # content_list [{},{},{},{}]
        # a 模式：追加（只写）
        file = open('/Users/lpf/Desktop/安康学院pyhton实训/python实训/第八天/斗鱼直播房间信息爬虫.csv', mode='a', encoding='utf-8')
        csv_file = csv.writer(file)
        # 写入表第一行
        csv_file.writerow(content_list[0].keys())
        for item in content_list:
            # 循环写入内容
            csv_file.writerow(item.values())
        # 清空缓存和关闭文件
        file.flush()
        file.close()

        pass

    def run(self):  # 入口
        # 1 设置最大窗口，有利于数据增量
        # self.driver.set_window_size(width, height)
        self.driver.maximize_window()
        # 2 发起请求
        self.driver.get(url=self.url)
        # 3 获取页面一的内容
        content_list, next_btn = self.getZBContent()
        # 4 先把页面一的数据保存
        self.save(content_list)
        print(next_btn)
        # 5 循环判断
        index = 1
        while next_btn != None:
            # 6 点击下一页
            next_btn.click()
            content_list, next_btn = self.getZBContent()
            self.save(content_list)
            index += 1
            print(index)
        else:
            print('采集结束')
            self.driver.quit()  # 退出

        pass


app = SpiderDouYuApp()
app.run()






