from selenium import webdriver
import time
from selenium.webdriver.support.ui import WebDriverWait
# from selenium.webdriver.support import expected_conditions as EC

class DouyuSpider:
    def __init__(self):
        self.start_url = "https://www.douyu.com/directory/all"
        self.driver = webdriver.Chrome(r'E:\chromedriver_win32 存放目录\chromedriver.exe')
        self.wait = WebDriverWait(self.driver, 20)
        # 爬取页面的次数
        self.count = 0

    def get_content_list(self):
        content_list =[]
        li_list = self.driver.find_elements_by_xpath('//main[@id="listAll"]/section[2]/div[2]/ul[@class="layout-Cover-list"]/li')
        for li in li_list:
            item = {}
            # item["room_img"] = li.find_element_by_xpath('.//div[@class="LazyLoad is-visible DyImg DyListCover-pic"]/img').get_attribute('src')
            # item["room_img"] = li.find_element_by_xpath('.//div[@class="DyListCover-imgWrap"]//img').get_attribute('src')
            # item["room_img"] = self.wait.until(lambda diver:li.find_element_by_xpath('.//div[@class="LazyLoad is-visible DyImg DyListCover-pic"]/img')).get_attribute('src')

            # 唯一有效的能提取xpath，但是必须配合：页面最大化和拉动屏幕 操作
            item["room_img"] = li.find_element_by_xpath('.//img[@class="DyImg-content is-normal "]').get_attribute('src')
            item["room_title"] = li.find_element_by_xpath('.//h3').text
            item["room_cate"] = li.find_element_by_xpath('.//span[@class="DyListCover-zone"]').text
            item["anchor_name"] = li.find_element_by_xpath('.//h2[@class="DyListCover-user"]').text
            item["room_hot"] = li.find_element_by_xpath('.//span[@class="DyListCover-hot"]').text
            print(item)
            content_list.append(item)
        # 分别检查span和li标签到底是否可以
        next_url = self.driver.find_elements_by_xpath('//li[@class=" dy-Pagination-next"]')
        # next_url = self.driver.find_elements_by_xpath('//li[@class=" dy-Pagination-next"]/span')
        next_url = next_url[0] if len(next_url)>0 else None
        return content_list,next_url

    def save_content_list(self,content_list):
        pass

    def run(self): #实现主要逻辑
        # 1.start_url
        # 2.发送请求，获取响应
        self.driver.get(self.start_url)
        time.sleep(2)

        # 3.加载页面
        # 页面最大化
        self.driver.maximize_window()

        # 拉动滚动条（这里的值，更具具体页面设置）
        for i in range(16):
            time.sleep(1)
            self.driver.execute_script("window.scrollBy(0, 500)")

        # 4.提取数据，提取下一页的元素
        content_list,next_url = self.get_content_list()
        # 5.保存数据
        self.save_content_list(content_list)
        # 6.点击下一页元素，循环
        while next_url is not None:
            next_url.click()
            # 下一页后等待加载
            time.sleep(2)
            # 拉动滚动条
            for i in range(16):
                time.sleep(1)
                self.driver.execute_script("window.scrollBy(0, 500)")
            content_list,next_url = self.get_content_list()
            self.save_content_list(content_list)

            self.count += 1
            if self.count > 2:
                break
        self.driver,quit()

if __name__ == '__main__':
    douyu_spider = DouyuSpider()
    douyu_spider.run()

"""
注意1：提取数据，需要：
        img对应的xpath必须是
        页面最大化：否则图片标签没有加载出来
        拉动滚动条：否则真正图片地址没有加载出来
      
"""