import json
import os
from urllib.parse import urljoin

from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import  time

# 打开浏览器
chrome = webdriver.Chrome()
chrome.get('https://fanxing.kugou.com/pcindex/category/8000?action=spreadIndex&id=3')
chrome.maximize_window()
# 初始化一个空列表，储存最终的去重数据
unique_data = []

# 第一次滚动（触发动态加载）
chrome.execute_script("window.scrollBy(0, 2000);")
time.sleep(2)  # 等待加载

# 第二次滚动（可根据需要调整）
chrome.execute_script("window.scrollBy(0, 4000);")
time.sleep(2)
chrome.execute_script("window.scrollBy(0, 6000);")
time.sleep(2)
chrome.execute_script("window.scrollBy(0, 8000);")
time.sleep(2)
# 等待每日推荐整体可见
WebDriverWait(chrome , 20).until(
    EC.visibility_of_element_located((By.XPATH , '//div[@class="fx-category-list-items"]'))
)

"""
# 将每一项文本和播放量组合成一个字典或元组
# 获取刷新后的文字（每个元素是WebElement对象，需要提取.text）
texts = [text_element.text.strip() for text_element in
         chrome.find_elements(By.XPATH , '//div[@class="special_list"]/ul[2]/li/div[1]')]

# 获取刷新后的播放量（同样提取文本）
play_counts = [play_count.text.strip() for play_count in
               chrome.find_elements(By.XPATH , '//div[@class="special_list"]/ul[2]/li/div[3]')]


for desc , people in zip(texts , play_counts) :
    # 可以将其作为字典或者元组存储
    combined = {
        'desc' : ' '.join(desc.strip().split()) ,
        'people' : ' '.join(people.strip().split())
    }
    print(combined)
    # 将数据添加到列表中
    unique_data.append(combined)

"""
texts = [text_element.text.strip() for text_element in
         chrome.find_elements(By.XPATH , '//div[@class="fx-category-list-items"]/div/div/div[2]/a[@title]')]
for text in texts :
    combined = {
        'text' : ' '.join(text.strip().split())
    }
    unique_data.append(combined)


# 按下刷新按钮
# refresh_button = WebDriverWait(chrome, 10).until(
#    EC.element_to_be_clickable((By.XPATH, '//span[contains(@class,"Daily")]'))
# )
# refresh_button.click()
#
# sleep(2)  # 等待下一次刷新
#    except Exception as e:
#        print(f"Error on iteration {i+1}: {e}")
#        continue  # 如果发生异常，跳过本次循环

# 去重
unique_data = list({frozenset(item.items()) : item for item in unique_data}.values())

# 导出到 JSON 文件
with open('text.json', 'w', encoding='utf-8') as f:
    json.dump(unique_data, f, ensure_ascii=False, indent=4)

