#encoding=utf-8
import scrapy
import time
from web_spider.items import newsItem
"""布隆过滤器"""
from pybloomfilter import BloomFilter
from WebSite import WebSite
"""用于分类"""
#from Classifier_controller import Classifier_controller
from WebPage.models import webpage
"""用于去重"""
from Simhash_with_weight import Simhash
"""用于网页的正文提取"""
from cx_extractor import CxExtractor
import urlparse
import chardet

def LevelIsVaild(level):
    return level <= 0

class newsSpider(scrapy.Spider):

    name = "newsSpider"

    def __init__(self, category=None, *args, **kwargs):
        super(newsSpider, self).__init__(*args, **kwargs)
        self.bf = BloomFilter(10000000, 0.01, 'bloom UrlFilter')
        self.allowed_domains = list()
        self.start_urls = list()
        self.ws = WebSite()

        for ad in self.ws.get_allowed_domains():
            self.allowed_domains.append(ad)

        for su in self.ws.get_start_urls(): 
            self.start_urls.append(su)
        """对象cc处理分类"""
        #self.cc = Classifier_controller()
        self.extractor = CxExtractor()

    def parse(self, response):
        
        """如果页面是新的"""
        if not self.bf.add(response.url):          
            item = newsItem()
            level = response.meta.get('level', 0)

            """仅下载有标题的网页"""
            if response.xpath("//title/text()").extract():
                print response.url

                """提取网页的正文部分"""
                try:
                    unicode_text = response.body.decode(response.encoding)
                except UnicodeDecodeError:
                    unicode_text = response.body.decode(chardet.detect(response.body)['encoding'])

                text = self.extractor.get_text(unicode_text)
                item['text'] = text

                if len(item['text']) >= 500:
                    hash_value = Simhash(item['text'])
                    pages = webpage.objects.all()

                    """如果网页没有内容重复"""
                    dup_flag = False
                    for page in pages:
                        if hash_value.isDuplicate(page.hash_value):
                            dup_flag = True
                            break
                    if not dup_flag:
                        item['intro'] = item['text'][:100]
                        item['title'] = response.xpath('/html/head/title/text()').extract()[0]
                        item['link'] = str(response.url)
                        item['dow_time'] = time.strftime('%Y-%m-%d %H:%M', time.localtime(time.time()))
                        item['website'] = ''
                        item['hash_value'] = hash_value.__float__()            
                        print item['text']
                        yield item

                for url in response.xpath('//a/@href').extract():
                    abs_url = urlparse.urljoin(response.url, url.strip())
                    if LevelIsVaild(level):
                        yield scrapy.Request(abs_url, callback=self.parse, meta={'level': level + 1})

    
    def closed(self, reason):
        print "------------------>3<----------------------"
