#!/usr/bin/python
# -*- coding: utf-8 -*-


import requests
import time
from bs4 import BeautifulSoup

#设置请求头部信息
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
    'Accept':'text/html;q=0.9,*/*;q=0.8',
    'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
    'Accept-Encoding':'gzip',
    'Connection':'close',
    'Referer':'http://www.baidu.com/link?url=_andhfsjjjKRgEWkj7i9cFmYYGsisrnm2A-TN3XZDQXxvGsM9k9ZZSnikW2Yds4s&amp;amp;wd=&amp;amp;eqid=c3435a7d00146bd600000003582bfd1f'
}

spider_url = "http://finance.sina.com.cn/7x24/"
rq = requests.get(url=spider_url, headers=headers)
print("StatusCode:" , str(rq.status_code) , ", Encoding:" , rq.encoding)


#解析抓取的页面内容
html = rq.content
sf = BeautifulSoup(html,'html.parser')
timepoints = sf.find_all('p',attrs={'class':'bd_i_time_c'})
news = sf.find_all('p',attrs={'class':'bd_i_txt_c'})
print("Timepoints-len:", str(len(timepoints)), ",news-len:", str(len(news)))
    
for i in range(len(timepoints)):
    print("=================================================")
    print("Time: " + timepoints[i])
    print("content: " + news[i])
    print("=================================================")

