"""

@Author : Lee Yucheng
@Contact : 2925168463@qq.com
@Project : GlidSky_Rank_Of_SecondRank_IP
@File : RankOfIP2.py
@Software : Visual Studio Code
"""

import requests
import re
import time
from config import proxy
from bs4 import BeautifulSoup

def get_time():
    round(time.time())

data = {}
url_login  = "http://glidedsky.com/login"
r = requests.get(url_login)

data['_token'] = re.findall(r'<meta name="csrf-token" content="(.*?)">',r.text)[0]
#用户名密码 隐去
data['email'] = '------' 
data['password'] = '-------'

cookies = r.cookies.get_dict()
HEADER = {'Cookie':'Hm_lvt_020fbaad6104bcddd1db12d6b78812f6=1565850527;_ga=GA1.2.438446912.1565850527; _gid=GA1.2.1529928071.1565850527; _gat_gtag_UA_75859356_3=1; Hm_lpvt_020fbaad6104bcddd1db12d6b78812f6=%s; footprints=%s; XSRF-TOKEN=%s; glidedsky_session=%s'%(get_time(), cookies['footprints'], cookies['XSRF-TOKEN'], cookies['glidedsky_session'])}

s = requests.session()

res = s.post(url_login, headers=HEADER, data=data,timeout=5)

if "爬虫" in res.text:
    print("登陆成功")
else:
    print("登陆失败")

sum_num = 0
page = 1
while True:
    if page==1001:
        break
    ip2url = "http://glidedsky.com/level/web/crawler-ip-block-2?page=%s"%page
    try:
        ipdata = s.get(ip2url,proxies=proxy.GetProxy(),timeout=5)
    except:
        continue
    if ipdata.status_code != 200:
        continue
    soup = BeautifulSoup(ipdata.text,"html.parser")
    nums = soup.find_all("div",attrs={"class":"col-md-1"})
    for num in nums:
        sum_num =  sum_num + int(num.text.strip())
    print(page)
    page+=1
print(sum_num)

#Result : 2846782

            
