#!/usr/bin/env python
#!coding=utf-8


import requests as rq
import sys
import  headers as Headers
from md5 import *
import os

from bs4 import BeautifulSoup as bs


reload(sys)
sys.setdefaultencoding( "utf-8" )

'''
爬虫类
爬取给定的url，并调用JS解析库解析之后，将最终的HTML保存在html中
'''
class Spider (object):
    headers = Headers.headers 
    urls = [
       'http://haijuw.com/'
    ]
 
    
    def run(self):
        for url in self.urls :
 
           s = rq.Session()
           s.get(url)
           s.headers.update(self.headers)
           response = s.get(url);
           
           self.parse(s,response,bs)
          
    '''
    @esponse 响应对象
    '''
    
    def parse(self,request,response,bs):
        print request
        print response
        soup = bs(response.text,'html.parser')
        img_list = soup.find_all('img')
        print img_list
        
        pass

if __name__ == '__main__':

  example = Spider()
  response =  example.run()
