#-*- coding:utf-8 -*-
import urlparse
import json
import traceback
import urllib
import datetime

from scrapy import log
from scrapy.spider import BaseSpider
from scrapy.http import Request
from scrapy.selector import Selector
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor

from query_video_info.items import QueryVideoInfoItem


class HunantvSpider(BaseSpider):

    name = "hunantv"
    domain = "www.hunantv.com"

    def __init__(self, query, rdir, start_time, content_id):
        self.query = query.strip()
        self.rdir = rdir
        self.start_time = start_time
        self.content_id = content_id
        self.fetch_time = datetime.date.today()

    def start_requests(self):
        url = "http://so.hunantv.com/so/k-%s" % urllib.quote(self.query)
        yield Request(url, callback=self.parse_search)
        
    def parse_search(self, response):
        video_lx = SgmlLinkExtractor(allow=("http://www\.hunantv\.com/v/\d/\d+/./\d+\.html$"))
        video_links = video_lx.extract_links(response)
        for link in video_links:
            url = link.url
            yield Request(url, meta={"url": url, "ugc": False})

        sel = Selector(response)
        next_page_relative_url = "".join(sel.xpath(u"//a[@title='下一页']/@href").extract()).strip() or None
        if next_page_relative_url is not None:
            next_page_url = urlparse.urljoin(response.url, next_page_relative_url)
            yield Request(next_page_url, callback=self.parse_search)
            
    def parse(self, response):
        meta = response.meta
        try:
            meta["aid"] = response.url.split("/")[-1][:-5].strip()
            if not meta["aid"].isdigit():
                log.msg("url:%s get aid error" % response.url, level=log.WARNING)
                return
        except Exception as err:
            log.msg("url:%s get aid error" % response.url, level=log.WARNING)
            return
        meta["url"] = response.url
        sel = Selector(response)
        meta["title"] = u"".join(sel.xpath("//div[@class='play-index-til']/text()").extract())
        meta["username"] = u""
        meta["release_time"] = u""
        meta["crumbs"] = u""
        meta["text_short"] = u""
        meta["description"] = u""
        count_data_url = "http://click.hifly.tv/get.php?aid=%s&type=videos" % meta["aid"]
        yield Request(count_data_url, meta=meta, callback=self.parse_count_data)
    
    def parse_count_data(self, response):
        meta = response.meta
        try:
            count_data_info = response.body_as_unicode().strip()
            count_data = json.loads(count_data_info)
            meta['up_times'] = count_data["data"]["up"]
            meta['down_times'] = count_data["data"]["down"]
            meta["play_times"] = int(count_data["data"]["click"].replace(",", ""))
        except Exception:
            log.msg("url:%s can't get count data info" % meta["url"], level=log.WARNING)
            meta['up_times'] = 0
            meta['down_times'] = 0
            meta["play_times"] = 0
        comment_url = ("http://comment.hunantv.com/video_comment/list/?"
                       "type=hunantv2014&subject_id=%s&page=1") % meta["aid"]
        yield Request(comment_url, meta=meta, callback=self.parse_comment)
        
    def parse_comment(self, response):
        meta = response.meta
        meta["comment_content"] = []
        meta["comment_num"] = 0
        try:
            comment_info = json.loads(response.body_as_unicode())
        except Exception:
            log.msg("url:%s can't get comment info" % meta["url"], level=log.WARNING)
            return self.construct_item(meta)

        meta['comment_num'] = comment_info['total_number']

        for comment in comment_info['comments']:
            try:
                meta['comment_content'].append({'username': comment['user']['nickname'].encode("utf-8", "ignore"),
                                                'comments_content': comment['content'].encode("utf-8", "ignore")})
            except Exception:
                log.msg(traceback.format_exc(), level=log.WARNING)

        return self.construct_item(meta)

    def construct_item(self, meta):
        i = QueryVideoInfoItem()
        i_keys = set(["title", "username", "release_time", "play_times", "url",
                      "crumbs", "up_times", "down_times", "text_short", "description",
                      "comment_num", "comment_content", "ugc"])
        diff_keys = i_keys - set(meta.keys())
        if diff_keys:
            exception_info = "miss data of %s" % diff_keys
            raise Exception(exception_info)
        for k in i_keys:
            if type(meta[k]) == unicode:
                i[k] = meta[k].encode("utf-8", "ignore")
            else:
                i[k] = meta[k]
        return i
