# -*- coding:utf-8 -*-
import urlparse,re

from bs4 import BeautifulSoup

from http.request import Request
from items import Item
import settings
import funcs

class Spider(object):
    def __init__(self):
        pass
    
    def on_get_link(self,soup,field,href):
        '''提取链接'''
        urls=[]
        for f in soup.find_all(field):
            url=f.get(href,'').strip()
            if url:
                urls.append(url)
        return urls
    
    def get_link(self,soup):
        '''提取需要的链接'''
        urls=self.on_get_link(soup,'a','href')
        css_urls=self.on_get_link(soup,'link','href')
        js_urls=self.on_get_link(soup,'script','src') 
        img_urls=[]#self.on_get_link(soup,'img','src')
        return soup,urls,css_urls,js_urls,img_urls
    
    def parse(self,response):
        '''分析网页内容'''
        ext=funcs.get_url_ext(response.url)                     #获取文件扩展名
        if (ext not in settings.S_img_ext) and (ext not in ('css','js')):
            data,coding=funcs.decode_data(response.body)
            soup=BeautifulSoup(str(data),'lxml',from_encoding='utf-8')
            soup,urls,css_urls,js_urls,img_urls=self.get_link(soup)
            all_urls=css_urls+js_urls+urls+img_urls
            
            for url in all_urls:
                vurl=funcs.valid_url(response.url,url).strip()      #判断是否有效链接
                if vurl!='':
                    _url=funcs.decode_data(vurl)[0].encode('utf-8')
                    if _url:
                        vurl=_url
                    yield Request(vurl)
                    
            item=Item()
            item.url=response.url
            item.soup=soup
            item.content=str(soup)                      #使用修改后的数据
            item.coding=coding                          #内容编码
            item.file_length=int(len(response.body))    #原始文件大小
            yield item
        else:
            item=Item()
            item.url=response.url
            item.soup=None
            item.content=response.body                  #使用修改后的数据
            item.coding=''                              #内容编码
            item.file_length=int(len(response.body))    #原始文件大小
            yield item
        
if __name__=='__main__':
    s=Spider()
    s2='http://forum.csdn.net/PointForum/Forum/TopicList.aspx?Alias=OL_Script&ListType=UnClosedList&page=1#sdf'
    print s.valid_url(s2,'http://forum.csdn.net/PointForum/Forum/TopicList.aspx?Alias=OL_Script&ListType=UnClosedList&page=2')



















