# coding=utf8

import re
import urllib2

from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.contrib.spiders import CrawlSpider, Rule
from flash4399.items import FlashItem, FlashCItem
from scrapy import log


get_page_cache = {}
def get_page(url):
    """
    return HTML or False on error
    """

    if get_page_cache.get(url):
        return get_page_cache.get(url)

    html = False

    try:
        req = urllib2.Request(url=url)
        f = urllib2.urlopen(req)

        html = unicode(f.read(), 'gbk', 'ignore')
    except urllib2.HTTPError:
        pass

    get_page_cache[url] = html

    return html


class Flash4399Spider(CrawlSpider):
    name = 'flash4399'
    allowed_domains = ['4399.com']
    start_urls = ['http://www.4399.com/']

    rules = (
        Rule(SgmlLinkExtractor(allow=r'www\.4399\.com/flash/[0-9]+\.htm'), callback='parse_item', follow=True),
        Rule(SgmlLinkExtractor(allow=r'www\.4399\.com/flash_fl/.*\.htm'), callback='parse_category_items', follow=True),
    )

    def parse_item(self, response):
        hxs = HtmlXPathSelector(response)
        i = FlashItem()

        try:
            m = re.match(r'.*/flash/([0-9]+)\.htm', response.url)
            i['id'] = m.group(1)
        except:
            log.msg('cannot find id: %s' % (response.url,))
            return

        i['name'] = hxs.select('//table[@height="22"]/tr/td/a[last()]/text()').extract()[0]

        category = hxs.select('//table[@height="22"]/tr/td/a[2]/text()').extract()[0]
        i['category'] = re.sub(u'小游戏', '', category)

        r = hxs.select('//div[@id="mb_game"]/table[2]/tr/td//p/text()|//div[@id="mb_game"]/table[2]/tr/td//font/text()|//div[@id="mb_game"]/table[2]/tr/td//div/text()|//div[@id="mb_game"]/table[2]/tr/td//span/text()').extract()
        i['help'] = '\n'.join(r)

        try:
            i['pic'] = hxs.select('//div[@class="GamePic"]//img/@src|//div[@class="GamePic2"]//img/@src').extract()[0]
        except:
            log.msg('cannot find image: %s' % (response.url,))
            return

        i['flash'] = hxs.select('//td[@class="font14px"]/p/script/text()').extract()[0].split('"')[1]
        if not i['flash']:
            log.msg('cannot find flash: %s' % (response.url,))
            return

        server_js = False
        r = hxs.select('//head/script/@src').extract()
        for ss in r:
            if ss[-4:] == '.gif':
               server_js = ss
               break
   
        server_js_url = 'http://www.4399.com' + server_js
        servers = get_page(server_js_url)
        if not servers:
            log.msg('cannot fetch server_js: %s' % (url,))
            return

        base_flash_url = servers.split('"')[1]
        if base_flash_url[:7] != 'http://':
            log.msg('cannot parse server_js: %s' % (servers,))
            return

        i['flash'] = base_flash_url + i['flash']


        
        # log.msg("%s %s" % (i['id'], i['flash']))


        # print i
        return i

    def parse_category_items(self, response):
        hxs = HtmlXPathSelector(response)
        # i = FlashItem()
        #i['domain_id'] = hxs.select('//input[@id="sid"]/@value').extract()
        #i['name'] = hxs.select('//div[@id="name"]').extract()
        #i['description'] = hxs.select('//div[@id="description"]').extract()
        games = hxs.select('//div[@class="GamesList"]//li')
        items = []
        for game in games:
            i = FlashCItem()
            i['id'] = game.select('a/@href').re('\\d+')[0]
            i['pic_small'] = game.select('a/img/@src').extract()[0]
            print i
            items.append(i)
        
        return items


SPIDER = Flash4399Spider()
