# -*- coding=utf-8 -*-
import re
import json
import requests
import random
import os
import urllib
import time
from http import cookiejar
from urllib import request,parse
from io import BytesIO
#测试
#Test
#返回一个随机的请求头 headers
def get_UserAgent():
        USER_AGENTS = ["Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
                "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
                "Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
                "Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
                "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
                "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
                "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
                "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
                "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
                "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
                "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
                "Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
                "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
                "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
                "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
                "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",]
        UserAgent = random.choice(USER_AGENTS)
        headers = {'User-Agent': UserAgent}
        return headers


#获取js里面yunData数据,返回yunData字符串
def get_contentForJs(html):
	res_str = r'yunData.setData\({(.*?)}\)'
	my_js = re.findall(res_str,html,re.S | re.M)
	jsData = '{' + my_js[0] + '}'
	return jsData

#拼接post的url地址
def get_postUrl(params):
	urlstr = 'https://pan.baidu.com/api/sharedownload?'
	params = json.loads(params)
	urlstr += 'sign=' + str(params.get('sign')) + ''
	urlstr += '&timestamp=' + str(params.get('timestamp')) + ''
	urlstr += '&bdstoken=' + str(params.get('bdstoken')) + ''
	urlstr += '&channel=chunlei'
	urlstr += '&clienttype=0'
	urlstr += '&web=1'
	urlstr += '&app_id=250528'
	return urlstr
	
#拼接post携带的参数
def get_postData(params):
	postdata = {}
	params = json.loads(params)
	postdata["encrypt"] = 0
	postdata["product"] = "share"
	postdata["uk"] = str(params.get("uk"))
	postdata["primaryid"] = str(params.get("shareid"))
	postdata["fid_list"] = "[" + str(params['file_list']['list'][0].get('fs_id')) + "]"
	return postdata
    
#发送post请求获取真实下载地址
def get_downLoad(postUrl,params,proxies):
        print('发送post请求获取真实下载路径...')
        header_dict = get_UserAgent()
        params = parse.urlencode(params).encode(encoding='UTF8')
        #构造IP代理
        proxies = {'http' : '' + str(proxies) + ''}
        proxy_support = urllib.request.ProxyHandler(proxies)
        #创建opener
        opener = urllib.request.build_opener(proxy_support)
        #添加headers
        opener.addheaders = header_dict
        # 安装opener，此后调用urlopen()时都会使用安装过的opener对象
        urllib.request.install_opener(opener)
        req = request.Request(url=postUrl,data=params)
        resp = request.urlopen(req)
        resp = resp.read().decode(encoding='utf-8')
        return resp

#文件下载1
def save_file(downloadUrl,saveFilePath):
        print('文件开始下载并保存...')
        header_dict = get_UserAgent()
        with requests.get(downloadUrl,headers = header_dict,timeout=6,stream=True) as web:
                print(web.status_code)
                # 为保险起见使用二进制写文件模式，防止编码错误
                with open(saveFilePath,'wb') as outfile:
                        for chunk in web.iter_content(chunk_size=1024):
                                outfile.write(chunk)          
        print('文件下载完成...')
        
#文件下载2
def save_file_retrieve(downloadUrl,saveFileName):
        local = os.path.join('D://downLoad//',saveFileName)
        request.urlretrieve(downloadUrl,local,Schedule)

def Schedule(a,b,c):
    '''''
    a:已经下载的数据块
    b:数据块的大小
    c:远程文件的大小
   '''
    per = 100.0 * a * b / c
    if per > 100 :
        per = 100
    print('%.2f%%' % per)
        
#文件下载3
def get_file(downloadUrl,saveFilePath):
        try:
                u = request.urlopen(downloadUrl)
                print('文件开始下载并保存...')
                block_sz = 8192
                with open(saveFilePath, 'wb') as f:
                        while True:
                                buffer = u.read(block_sz)
                                if buffer:
                                        f.write(buffer)
                                else:
                                        break
                print('文件下载完成...')
        except urllib.error.HTTPError:
                #碰到了匹配但不存在的文件时，提示并返回
                print(downloadUrl, "url file not found")
        return

#获取免费IP代理
def get_ipAgent(numCount):
	header_dict = get_UserAgent()
	ipSxb = '中国'
	ipSxb = request.quote(ipSxb.encode('gb2312'))
	print(ipSxb)
	urlDemo = 'http://www.66ip.cn/mo.php?'
	params = 'sxb=' + str(ipSxb) + ''                #地区
	params += '&tqsl=' + str(numCount) + ''  #数量
	params += '&port='             #端口号
	params += '&export='           #排除端口
	params += '&ktip='             #开头IP
	params += '&sxa='              #运营商
	params += '&submit=%CC%E1++%C8%A1'#
	params += '&textarea='         #
	try:
		response = requests.get(urlDemo + params,headers = header_dict,timeout = 5)
		response.encoding = response.apparent_encoding
		if response.status_code == requests.codes.ok:
			res_str = r'\r\n\t\t(.*?)<br />' 
			soup = re.findall(res_str,response.text,re.S | re.M)
			return soup
	except:
		print('没有找到数据')
		return null

#筛选出有效IP
#def check_ip(ip):
#	header_dict = get_UserAgent()
#	params = json.dumps({'ipadd':'' + str(ip) + ''})
#	try:
#	    response = requests.post("http://www.66ip.cn/yz/post.php",headers = header_dict,data=params,timeout = 5)
#		response.encoding = response.apparent_encoding
#		if response.status_code == requests.codes.ok:
#		    print(response.text)           
#    except:
#    print("asd")

def check_ip(ip):
    header_dict = get_UserAgent()
    params = json.dumps({'ipadd':'' + str(ip) + ''})
    response = requests.post("http://www.66ip.cn/yz/post.php",headers = header_dict,data=params,timeout = 5)
    response.encoding = response.apparent_encoding
    if response.status_code == requests.codes.ok:
        print(response.text)        

#程序入口
#后期优化问题1、获取下载文件类型     2、代理IP处理   3、开启多线程爬取
if __name__ == '__main__':
	print('爬虫开始...')
	IpList = get_ipAgent(20)
	print(IpList)
	for item in IpList:
		check_ip(item)
		time.sleep(random.random() * 3)  # 随机睡眠一段时间
		
	

		


