# -*- coding: utf-8 -*-

url = "http://blog.csdn.net/yuanmeng001"
url1 = "http://blog.csdn.net/happydeer"
'''

import urllib
print urllib.urlopen(url).read()
#结果是403，表明禁止爬虫
'''
'''
简易的模拟访问
'''
import urllib2

'''
req = urllib2.Request(url1)
req.add_header("User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36")
req.add_header("GET",url)
req.add_header("Host","blog.csdn.net") #主机
req.add_header("Referer","http://blog.csdn.net/")#从哪里访问来的，一般说是这个网页只能从它的网页内部的网页访问

多次访问会呗屏蔽
'''


'''

另外一种写法
my_headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36",
           "Host": "blog.csdn.net",#主机
           "Referer": "http://blog.csdn.net/",#从哪里访问来的，一般说是这个网页只能从它的网页内部的网页访问
            "GET":url
           }

req = urllib2.Request(url1,headers=my_headers)
html = urllib2.urlopen(req)
#print html.read()

print html.headers.items() #查看访问的头部
'''


'''
更换访问机器，拼接成新的访问，防止提交次数过多被屏蔽
'''
my_headers = ["Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60",
                "Opera/8.0 (Windows NT 5.1; U; en)",
                "Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50",
                "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 9.50",
                "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0",
                "Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10",
                "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2",
                "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36",
                "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11",
                "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.133 Safari/534.16"
                ]
import random
def get_content(url,headers):
    '''
    @获取403禁止访问的网页
    :param url:
    :param headers:
    :return:
    '''
    random_header = random.choice(headers)
    print random_header

    req = urllib2.Request(url1)
    req.add_header("User-Agent",headers)
    req.add_header("Host", "blog.csdn.net")  # 主机
    req.add_header("Referer", "http://blog.csdn.net/")  # 从哪里访问来的，一般说是这个网页只能从它的网页内部的网页访问
    req.add_header("GET", url)
    content = urllib2.urlopen(req).read()
    return #content

#print get_content(url,my_headers)

