import requests
from bs4 import BeautifulSoup
import re
import urllib
import time
import urllib2
import time
headers = {'user_agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'}

for i in range(0,1):
 url = "https://bbs.hupu.com/bxj-"+str(i)
 print url
 try:
  r = requests.get(url,headers,timeout=10) 
 except urllib.URLError,e:
  print e.reason
 except urllib2.HTTPError,e:
  print e.reason 
 else:
  print(str(i+1),"status:",r.status_code)
  html = r.text
 soup = BeautifulSoup(html,'html.parser')
 hrefs = []
 for link in soup.find_all('a',class_="aulink"):
  hrefs.append(link.get('href'))

for href in hrefs:
 try:
  r = requests.get(href,headers,timeout=10)
 except urllib.URLError,e:
  print e.reason
 except urllib2.HTTPError,e:
  print e.reason
 else:
  html = r.text
 soup = BeautifulSoup(html,'html.parser')
 links =  soup.find_all('div',class_='personal_right')
 for link in links:
  name = link.find('div',class_='left')
  info = link.find('div',class_='personalinfo')
  
  info_parser = info.text.replace('    ','\n')
  print("********************")
  print(name.text.strip())
  print(info_parser.strip())   
  print("********************")




 
 

