from bs4 import BeautifulSoup
import pandas as pd
import requests
import csv,time
import codecs
import io
import sys
sys.stdout=io.TextIOWrapper(sys.stdout.buffer,encoding='gb18030')

# 存一个数组，可以追加
def cun(data,name):
	f=open(name+'.csv',"a")
	for s in data:
		f.write(s+"\n")
	f.close()
# 存一条数据，可以追加
def cun_cou(data,name):
	f=open(name+'.csv',"a")
	f.write(data+"\n")
	f.close()
# 对数据进行处理，可以追加
def cun_web(urls,name):
	dp=pd.DataFrame(urls)
	dp.to_csv(name+".csv",mode='a',encoding='utf_8_sig',index=False,header=False)
def get_WebLinkcsv(name):
	urls = []
	name = pd.read_csv(name+".csv")
	for i in range(len(name)):
		urls.append(name.iloc[i].values[0])
	return urls

def parse_HTML(url):
	data=[]
	error=[]
	try:
		req = requests.get(url,timeout=10,stream=True)
	except:		
		print("cuo:",url)
	
	soup = BeautifulSoup(req.text, 'lxml')
	li_list = soup.find_all('li',class_="first")

	for li in li_list:
		try:
			c=li.find("a",class_="sex-f")['href']
			print(c)
			data.append('https://www.babynamewizard.com'+c)
		except:	
			error.append(li)
	print(error)				
	return data
def parse_HTML1(url):
	data=[]
	error=[]
	try:
		req = requests.get(url,timeout=10,stream=True)
	except:		
		print("cuo:",url)
	
	soup = BeautifulSoup(req.text, 'lxml')
	li_list = soup.find_all('li',class_="first")

	for li in li_list:
		try:
			c=li.find("a",class_="sex-m")['href']
			print(c)
			data.append('https://www.babynamewizard.com'+c)
		except:	
			error.append(li)
	print(error)				
	return data


# 获取英文名链接中的具体数据
def parse_content(url):
	data=[]
	try:
		headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}
		req = requests.get(url,timeout=10,stream=True)
	except:		
		cun_cou(url,"cuo2")		
	soup = BeautifulSoup(req.text)
	n=[]
	n.append(url)
	try:
		name=soup.find("div",class_="title clear-block").find("span",class_="f").get_text().replace(",","").replace("\n","")
		n.append(name)
		try:
			p=soup.find("div",class_="origin").find('p').get_text().replace(",","").replace("\n","")			
		except:
			p="NULL"				
		try:
			p2=soup.find("div",class_="wiki").find('dl').get_text().replace(",","").replace("\n","")
					
		except:
			p2="NULL"
		n.append(p)	
		n.append(p2)
		try:
			user_say=soup.find_all("div",class_="result")
			for i in user_say:
				n.append(i.find("span").text)
		except:
			n.append("Null")			
		data.append(n)
		print(data)
		
		time.sleep(1)

	except:		
		cun_cou(url,"cuo3")
		
	return data
# 存储所有女生的url
def get_all_girlurl():
	
	b=['a','b','c','d','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
	urls=[]
	for i in b:
		urls.append("https://www.babynamewizard.com/baby-name/girl/%s" %i)
	for url in urls:
		cun(parse_HTML(url),"girlurl")

# 存储所有男生的url
def get_all_boyurl():
	
	b=['a','b','c','d','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
	urls=[]
	for i in b:
		urls.append("https://www.babynamewizard.com/baby-name/boy/%s" %i)
	for url in urls:
		cun(parse_HTML1(url),"boyurl")
def get_alldata():
	name="boyurl"
	urls=get_WebLinkcsv(name)
	count=0
	for url in urls:
		try:
			cun_web(parse_content(url),'data2')
		except:
			cun_cou(url,"bcuo4")
	return  urls
if __name__ == "__main__":
	get_alldata()
	