from selenium import webdriver
import time
from lxml import etree

class DouyuSpider:
    def __init__(self):
        self.start_url = "https://www.douyu.com/directory/all"
        self.driver = webdriver.Chrome(r'E:\chromedriver_win32 存放目录\chromedriver.exe')
        # 爬取页面的次数
        self.count = 0

    def get_content_list(self,html):
        content_list = []
        li_list = html.xpath(
            '//main[@id="listAll"]/section[2]/div[2]/ul[@class="layout-Cover-list"]/li')
        for li in li_list:
            item = {}
            # 唯一有效的能提取xpath，但是必须配合：页面最大化和拉动屏幕 操作
            # item["room_img"] = li.xpath('.//img[@class="DyImg-content is-normal "]/@src')
            item["room_img"] = li.xpath('.//div[@class="LazyLoad is-visible DyImg DyListCover-pic"]/img/@src')
            item["room_title"] = li.xpath('.//h3/text()')
            item["room_cate"] = li.xpath('.//span[@class="DyListCover-zone"]/text()')
            item["anchor_name"] = li.xpath('.//h2[@class="DyListCover-user"]/text()')
            item["room_hot"] = li.xpath('.//span[@class="DyListCover-hot"]/text()')
            print(item)
            content_list.append(item)
        # 分别检查span和li标签到底是否可以
        next_url = self.driver.find_elements_by_xpath('//li[@class=" dy-Pagination-next"]')
        # next_url = self.driver.find_elements_by_xpath('//li[@class=" dy-Pagination-next"]/span')
        next_url = next_url[0] if len(next_url) > 0 else None
        return content_list, next_url

    def save_content_list(self, content_list):
        pass

    def run(self):  # 实现主要逻辑
        # 1.start_url
        # 2.发送请求，获取响应
        self.driver.get(self.start_url)
        time.sleep(2)

        # 3.加载页面
        # 页面最大化
        self.driver.maximize_window()

        # 拉动滚动条（这里的值，更具具体页面设置）
        for i in range(16):
            time.sleep(1)
            self.driver.execute_script("window.scrollBy(0, 500)")

        # 4.提取数据，提取下一页的元素
        rooms = self.driver.page_source
        html = etree.HTML(rooms)
        content_list, next_url = self.get_content_list(html)
        # 5.保存数据
        self.save_content_list(content_list)
        # 6.点击下一页元素，循环
        while next_url is not None:
            next_url.click()
            # 下一页后等待加载
            time.sleep(2)
            # 拉动滚动条
            for i in range(16):
                time.sleep(1)
                self.driver.execute_script("window.scrollBy(0, 500)")

            rooms = self.driver.page_source
            html = etree.HTML(rooms)
            content_list, next_url = self.get_content_list(html)
            self.save_content_list(content_list)

            self.count += 1
            if self.count > 2:
                break
        self.driver, quit()


if __name__ == '__main__':
    douyu_spider = DouyuSpider()
    douyu_spider.run()

"""
注意1：room_img的提取方式有多种，我们不必直接定位到img标签
"""