import requests
import re
from bs4 import BeautifulSoup

#获取所有科室的ID
def get_all_code():
	url = 'http://ask.39.net/'
	headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36'}
	htm = requests.get(url,headers=headers).text
	all_code = re.findall(r'<a href="/browse/all_(.*?).html"',htm)   #正则  获取所有科室的id
	return all_code

#获取所有疾病的id
def get_dis_code(all_code):
	url = 'http://ask.39.net/browse/all_{}.html'.format(str(all_code))
	headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36'}
	htm = requests.get(url,headers=headers).text
	bs = BeautifulSoup(htm,'lxml')
	soup = bs.select(' .tg-box ')[0]
	patten = r'<dd><a href="/browse/(.*?).html"'
	codes = re.findall(patten,str(soup))
	return codes

#获取所有问题的id
def get_question_code(dis_code):
	codes = []
	url = 'http://ask.39.net/browse/{}.html'.format(dis_code)    #获取全部问题code的链接
	headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36'}
	htm = requests.get(url,headers=headers).text
	#获取全部问题code
	patten = r'<div class="J_check_more check-more.*?href="/browse/(.*?)-1-1.html|<a class="active" href="/browse/(.*?)-1-1.html">'
	code = str(re.findall(patten,str(htm),re.S)[0]).replace('\'','').replace(',','').replace('(','').replace(')','').replace(' ','')
	page = 1
	while 1:
		#print(page)
		urls = 'http://ask.39.net/browse/{}-1-{}.html'.format(str(code),str(page))   #问题所在链接
		html = requests.get(urls,headers=headers).text
		page += 1
		#获取所有问题id
		id_list = re.findall(r'href="/question/(.*?).html',html)
		if len(id_list) > 0:
			for id_ in id_list:
				codes.append(str(id_))
		else:
			break
	return codes

#获取问答内容
def get_content(question_code):
	answer = []
	url = 'http://ask.39.net/question/{}.html'.format(str(question_code))
	headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36'}
	htm = requests.get(url,headers=headers).text
	html = BeautifulSoup(htm,'lxml')
	question_title = html.select(' .sub_here ')[0].get_text().replace(' ','').replace('\n','')  #问题题目
	question_people = html.select(' .mation ')[0].get_text().replace('\n','').replace(' ','')   #问题人物信息
	question_content = html.select(' .txt_ms ')[0].get_text().replace('\n','').replace(' ','')  #问题内容
	question_time = html.select(' .txt_nametime > span')[1].get_text().replace('\n','').replace(' ','')  #问题时间
	i = 0
	while 1:
		try:
			answer_doctor = html.select(' .doc_txt ')[i].get_text().replace(' ','').replace('\n',' ')   #医生信息
			answer_content = html.select(' .sele_txt ')[i].get_text().replace(' ','').replace('\n','')  #回答内容
			answer_time = html.select(' .doc_time ')[i].get_text().replace(' ','')  #回答时间
			answer.append([answer_doctor,answer_content,answer_time])
			i += 1
		except:
			break
	return question_title,question_people,question_content,question_time,answer
#保存
def get_save(questions):
	file = open('questions.txt','a')
	file.write(str(questions[0])+'\n')
	file.write(str(questions[1])+'\n')
	file.write(str(questions[2])+'\n')
	file.write(str(questions[3])+'\n')
	for answer in questions[4]:
		file.write(str(answer[0]+'\n'))
		file.write(str(answer[1]+'\n'))
		file.write(str(answer[2]+'\n'))
	file.write('\n')
	file.close()
	print(questions[0])
if __name__ == '__main__':
	for all_code in get_all_code():   #获取科室id
		for dis_code in get_dis_code(all_code):  #获取疾病id
			try:
				for question_code in get_question_code(dis_code): #获取问题id
						questions =  get_content(question_code)  #获取问答内容
						get_save(questions)  #保存
			except:
				break
	
				