#! /usr/bin/env python
# -*- coding: utf8 -*-
#
#@auther www
#@date 2014-06-21
#
#crawl zei6.net
#
#

from scrapy.selector import Selector
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.contrib.spiders import CrawlSpider, Rule
from gaodeha_crawl.common.db_utils import DbUtils 
from gaodeha_crawl.common.constant import *
from gaodeha_crawl.items import PostItem
from scrapy import log
import time
import datetime
import re
import random

class Zei6Spider(CrawlSpider):
    name = 'zei6'
    allowed_domains = ['zei6.net']
    start_urls = ['http://www.zei6.net/qiushi/', 'http://www.zei6.net/wangwen/']
    max_brief_index = 256
    category_urls = {
                "http://www.zei6.net/qiushi/" : {
                        "category_id": "100000",
                        "category_name": "糗事精选",
                        "author": ["糗女","小蘑菇", "盖地虎"],
                    },
                "http://www.zei6.net/wangwen/" : {
                        "category_id": "101000",
                        "category_name": "无奇不有",
                        "author": ["往事莫回首","笑看人生"],
                    }
            }
    rules = (
             Rule(SgmlLinkExtractor(allow='/qiushi/list.*\.html', allow_domains=['zei6.net']), follow=True),
             Rule(SgmlLinkExtractor(allow='/qiushi/[\d/\.]*\.html', allow_domains=['zei6.net']), callback='parse_item'),
             #Rule(SgmlLinkExtractor(allow='/wangwen/list.*\.html', allow_domains=['zei6.net']), follow=True),
             #Rule(SgmlLinkExtractor(allow='/wangwen/[\d/\.]*\.html', allow_domains=['zei6.net']), callback='parse_item'),
            )


    def __init__(self):
        CrawlSpider.__init__(self)
        self.db = DbUtils()
        sql = "select category_id, category_name from tb_category where category_show_status=1 and category_id=%s"
        for url in self.category_urls.keys():
            category_value = self.category_urls[url]
            category = self.db.fetchOneDict(sql, (category_value["category_id"],))
            if category:
                #self.category_urls[url] = category
                pass
            else:
                #self.start_urls.remove(url)
                tsql = "insert into tb_category (category_id, category_name) values (%s, %s)"
                self.db.executeSql(tsql, (category_value["category_id"], category_value["category_name"]))


    def parse_item(self, response):
        log.msg("crawl parse url:" + response.url, level=log.DEBUG)
        item = PostItem()
        selector = Selector(response)
        postTitle = selector.xpath('//div[@class="m-detail_top"]/h2/text()').extract()
        item['post_title'] = postTitle[0]
        item['post_content'] = self.parse_item_content(selector)
        item['post_type'] = POST_TYPE_TEXT
        item['post_author'] = self.parse_item_author(response.url)
        item['post_status'] = POST_STATUS_CHECK
        item['post_brief'] = self.parse_item_brief(item['post_content'])
        item['comment_count'] = 0
        item['praise_count'] = 0
        item['hate_count'] = 0
        item['collect_count'] = 0
        item['share_count'] = 0
        item['post_category_id'] = self.parse_item_category_id(response.url)
        item['post_category_name'] = self.parse_item_category_name(response.url)
        item['post_origin_url'] = response.url
        item['post_origin_sitename'] = '贼6网'
        item['post_uuid'] = response.url
        item['post_insert_time'] = int(time.time())
        item['post_show_time'] = self.parse_item_time(selector)
        item['post_update_time'] = int(time.time())
        item['post_font_images'] = ''
        item['post_vedio_url'] = ''
        item['post_tags'] = ''
        item['top_order'] = 100
        return item

    def parse_item_content(self, selector):
        textNodeDirect = selector.xpath('//div[@class="m-detail_article"]/text()|//div[@class="m-detail_article"]/p/text()|//div[@class="m-detail_article"]/div/text()|//div[@class="m-detail_article"]/span/text()').extract()
        textNode = textNodeDirect
        text = ""
        if textNode and len(textNode) > 0:
            for node in textNode:
                if len(node.strip()) == 0:
                    continue
                node = re.sub(u"[(（]\s*关注微信账号[^)]+[)）]", "", node)
                node = re.sub(u"[(（]\s*本文来自[^)]+[)）]", "", node)
                if len(node.strip()) == 0:
                    continue
                text = text + '<p>' + node.strip() + '</p>'
            return text

    def parse_item_brief(self, content):
        if not content:
            return ''
        if len(content) > self.max_brief_index:
            end = self.max_brief_index
            while content[end] == '<' or content[end] == '/' or content[end] == 'p' or content[end] == '>':
                end = end + 1
            return content[:end] + "..."
        else:
            return content


    def parse_item_time(self, selector):
        postMeta = selector.xpath('//div[@class="artFrom"]/text()').extract()
        p = re.compile('\\d{4}-\\d{2}-\\d{2}')
        m = p.search(postMeta[0])
        post_show_time = int(time.time())
        if m:
            timeStr = m.group(0)
            post_show_time = long(time.mktime(datetime.datetime.strptime(timeStr, '%Y-%m-%d').timetuple()))
        return post_show_time


    def parse_item_author(self, url):
        for crawlUrl in self.category_urls:
            if url.startswith(crawlUrl):
                return random.choice(self.category_urls[crawlUrl]["author"])
        return "小蘑菇"

    def parse_item_category_id(self, url):
        for crawlUrl in self.category_urls:
            if url.startswith(crawlUrl):
                return self.category_urls[crawlUrl]["category_id"]
        log.msg("crawl error url:" + url, level=log.ERROR)
        raise Exception("crawl error url:" + url)

    def parse_item_category_name(self, url):
        for crawlUrl in self.category_urls:
            if url.startswith(crawlUrl):
                return self.category_urls[crawlUrl]["category_name"]
        log.msg("crawl error url:" + url, level=log.ERROR)
        raise Exception("crawl error url:" + url)



