#! /usr/bin/env python
# -*- coding: utf8 -*-
#
#@auther www
#@date 2014-06-21
#
#crawl http://www.xieetuan.com/nhan/
#
#

from scrapy.selector import Selector
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.contrib.spiders import CrawlSpider, Rule
from gaodeha_crawl.common.db_utils import DbUtils 
from gaodeha_crawl.common.constant import *
from gaodeha_crawl.items import PostItem
from scrapy import log
import time
import datetime
import re
import random

class CtbSpider(CrawlSpider):
    name = 'ctb'
    allowed_domains = ['ctb520.com']
    start_urls = ["http://www.ctb520.com/gaoxiao/", "http://www.ctb520.com/nhan/"]
    max_brief_index = 512
    category_urls = {
                "http://www.ctb520.com/nhan/" : {
                        "category_id": "107000",
                        "category_name": "寸土内涵图",
                        "author": ["撸啊撸", "小内裤", "大裤衩"],
                    },
                "http://www.ctb520.com/gaoxiao/" : {
                        "category_id": "107001",
                        "category_name": "寸土搞笑图",
                        "author": ["撸啊撸", "小内裤", "大裤衩"],
                    }
            }
    rules = (
             Rule(SgmlLinkExtractor(allow='/nhan/list.*\.html', allow_domains=['ctb520.com']), follow=True),
             Rule(SgmlLinkExtractor(allow='/nhan/[\d/\.]*\.html', allow_domains=['ctb520.com']), callback='parse_item'),
             Rule(SgmlLinkExtractor(allow='/gaoxiao/list.*\.html', allow_domains=['ctb520.com']), follow=True),
             Rule(SgmlLinkExtractor(allow='/gaoxiao/[\d/\.]*\.html', allow_domains=['ctb520.com']), callback='parse_item'),
            )


    def __init__(self):
        CrawlSpider.__init__(self)
        self.db = DbUtils()
        sql = "select category_id, category_name from tb_category where category_show_status=1 and category_id=%s"
        for url in self.category_urls.keys():
            category_value = self.category_urls[url]
            category = self.db.fetchOneDict(sql, (category_value["category_id"],))
            if category:
                #self.category_urls[url] = category
                continue
            else:
                #self.start_urls.remove(url)
                tsql = "insert into tb_category (category_id, category_name) values (%s, %s)"
                self.db.executeSql(tsql, (category_value["category_id"], category_value["category_name"]))


    def parse_item(self, response):
        log.msg("crawl parse url:" + response.url, level=log.DEBUG)
        item = PostItem()
        selector = Selector(response)
        postTitle = selector.xpath('//div[@class="leftcont"]/dl[@class="listitem"]//h3/text()').extract()
        item['post_title'] = postTitle[0]
        item['post_content'] = self.parse_item_content(selector)
        item['post_type'] = POST_TYPE_IMAGE
        item['post_author'] = self.parse_item_author(response.url)
        item['post_status'] = POST_STATUS_CHECK
        item['post_brief'] = ""
        item['comment_count'] = 0
        item['praise_count'] = 0
        item['hate_count'] = 0
        item['collect_count'] = 0
        item['share_count'] = 0
        item['post_category_id'] = self.parse_item_category_id(response.url)
        item['post_category_name'] = self.parse_item_category_name(response.url)
        item['post_origin_url'] = response.url
        item['post_origin_sitename'] = '寸土吧'
        item['post_uuid'] = response.url
        item['post_insert_time'] = int(time.time())
        item['post_show_time'] = self.parse_item_time(selector)
        item['post_update_time'] = int(time.time())
        item['post_font_images'] = ''
        item['post_vedio_url'] = ''
        item['post_tags'] = ''
        item['top_order'] = 100
        return item

    def parse_item_content(self, selector):
        imgs = selector.xpath('//div[@class="leftcont"]/dl[@class="listitem"]//div[@class="cont"]//img').extract()
        if not imgs:
            return ""
        return imgs[0]

    def parse_item_brief(self, content):
        if not content:
            return ''
        #if len(content) >= self.max_brief_index:
        #    return content[:self.max_brief_index] + "..."
        #else:
        return content.strip()


    def parse_item_time(self, selector):
        month = selector.xpath('//div[@class="time"]/span[@class="ym"]/text()').extract()
        day = selector.xpath('//div[@class="time"]/span[@class="d"]/text()').extract()
        post_show_time = int(time.time())
        if len(month) > 0 and len(day) > 0:
            timeStr = month[0] + "/" + day[0]
            post_show_time = long(time.mktime(datetime.datetime.strptime(timeStr, '%Y/%m/%d').timetuple()))
        return post_show_time


    def parse_item_author(self, url):
        for crawlUrl in self.category_urls:
            if url.startswith(crawlUrl):
                return random.choice(self.category_urls[crawlUrl]["author"])
        return "段子手"

    def parse_item_category_id(self, url):
        for crawlUrl in self.category_urls:
            if url.startswith(crawlUrl):
                return self.category_urls[crawlUrl]["category_id"]
        log.msg("crawl error url:" + url, level=log.ERROR)
        raise Exception("crawl error url:" + url)

    def parse_item_category_name(self, url):
        for crawlUrl in self.category_urls:
            if url.startswith(crawlUrl):
                return self.category_urls[crawlUrl]["category_name"]
        log.msg("crawl error url:" + url, level=log.ERROR)
        raise Exception("crawl error url:" + url)



