import requests, json
from lxml import etree
import pandas as pd
import time
import re
from bs4 import BeautifulSoup
from snownlp import SnowNLP
def get_html(名称):
      
   s = SnowNLP(名称).pinyin
   s = "".join(str(n) for n in s)
   url = "https://www.cnxiangyan.com/pinpai/"+s+"/"

   payload={}
   headers = {

      'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36',

   }

   res = requests.request("GET", url, headers=headers, data=payload).text

   bes = BeautifulSoup(res,"lxml")

   name_list, jg_list = [],[]

   name = bes.select("#aaa > li> div.xy_right > div.xy_tit > a")
   jg = bes.select("#aaa > li > div.xy_right > div.show_p > span:nth-child(2)")

   for i in name:
      name = i.text.replace(' ','')
      name_list.append(name.replace('\r\n',''))#标题

   for i in jg:
      jg_list.append(i.text.replace(' ',''))
      
   for id in range(0, len(name_list)):   
      print(name_list[id],jg_list[id])
      
   
if __name__ == "__main__":

   get_html('中华')



