import requests
from bs4 import BeautifulSoup
import re
import urllib
import time
import urllib2
import time
headers = {'user_agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'}

def get_page(link):
  url = link+"/follower"
  try:
   r = requests.get(url,headers,timeout=10) 
  except urllib2.HTTPError,e:
   print e.reason 
  else:
   html = r.text
  page_num = re.findall(r'<a .*>(.*?)</a><input .*>',html)
  print page_num
  if len(page_num):
   return int(page_num[0])
  else: 
   return 1

def get_href(url,page_num):
 href_fans = []
 fans_filter = []
 for i in range(0,page_num):
  url_fans = url+"/follower?page="+str(i+1)
  try:
   r = requests.get(url_fans,headers,timeout=10)
  except urllib2.HTTPError,e:
   print e.reason
  else:
   print(url_fans+" status:",r.status_code)
   html = r.text
  soup = BeautifulSoup(html,'html.parser')
  for link in soup('a',class_='u'):
   href_fans.append(link.get('href'))
  for href in href_fans:
   if href not in fans_filter:
    fans_filter.append(href)
 return fans_filter

def get_info(hrefs):
 for href in hrefs:
  try:
   r = requests.get(href,headers,timeout=10)
  except urllib2.HTTPError,e:
   print e.reason
  else:
   html = r.text
  soup = BeautifulSoup(html,'html.parser')
  links =  soup.find_all('div',class_='personal_right')
  for i in range(0,len(links)):
   name = links[i].find('div',class_='left')
   info = links[i].find('div',class_='personalinfo')
   info_parser = info.text.replace('    ','\n')
   print("********************")
   print(name.text.strip())
   print(info_parser.strip())
   print("********************")

href_author = []
author_filter = []
for i in range(0,10):
 url = "https://bbs.hupu.com/bxj-"+str(i)
 try:
  r = requests.get(url,headers,timeout=10)
 except urllib.URLError,e:
  print e.reason
 except urllib2.HTTPError,e:
  print e.reason
 else:
  print(str(i+1),"status:",r.status_code)
  html = r.text
 soup = BeautifulSoup(html,'html.parser')
 for link in soup.find_all('a',class_="aulink"):
  href_author.append(link.get('href'))
for href_a in href_author:
 if href_a not in author_filter:
  author_filter.append(href_a)
for i in range(0,len(author_filter)):
 fans = get_href(author_filter[i],get_page(author_filter[i]))
 print("Fans sum:"+str(len(fans)))
 #get_info(fans)

 
 

