import requests
import time
from bs4 import BeautifulSoup
hearders={
    'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36'
} 
source_list = 'http://www.tyqyyw.com/qingyinyue/page/{}/' 
context_list= []
#田园轻音乐网-轻音乐频道
num_list =[4,5,6] 
yuanzu1=(0,'1',"2",num_list)
#函数功能：读取其中一页的数据
def read_per_page(source):
    time.sleep(1)
    num =1
    res = requests.get(source,headers=hearders)        
    #print(res.text) 
    soup= BeautifulSoup(res.text,'html.parser')
    
    #方法1
    for URL_element in soup.find_all('a',attrs={"class":"zoom"}):
        #print(f"{URL_element}\r\n")
        URL_element_title = URL_element.get('href') #获取a标签内部的title数据
        context_list.append(URL_element_title)
        print("page:{},NO.{},context:{}\r\n".format(i,num,URL_element_title))
        num +=1

    #方法2
'''   for URL_element in soup.select('#post_container > li > div.article > h2 > a'):
        URL_element_title = URL_element.get('title') #获取a标签内部的title数据
        context_list.append(URL_element_title)
        print("page:{},NO.{},context:{}\r\n".format(i,num,URL_element_title))
        num +=1
'''
if __name__ == '__main__':
    m = 0
    print(m,":",yuanzu1[m],"\r\n")
    while (yuanzu1[m] != None):
        print(m,":",yuanzu1[m],"\r\n")
        m +=1
    for i in range(1,295):
        read_per_page(source_list.format(i))