#!/usr/bin/python
# -*- coding: utf-8 -*-

import requests
import time
from bs4 import BeautifulSoup


payload = {'wd': 'Python爬虫', 'rn': '20'}
rq = requests.get("http://www.baidu.com/s", params=payload)

print("URL: ", rq.url)
print("StatusCode:" , str(rq.status_code) , ", Encoding:" , rq.encoding)

#解析抓取的页面内容
html = rq.content
bd = BeautifulSoup(html,'html.parser')

blocks = bd.find_all('div',attrs={'class':'result c-container '})

for block in blocks:
    print(block.h3.get_text())
    print(block.find('a')['href'])

