#-*- coding:utf-8 -*-
import re
import json
import traceback
import urllib
import datetime

from scrapy import log
from scrapy.spider import BaseSpider
from scrapy.http import Request
from scrapy.selector import Selector
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor

from query_video_info.items import QueryVideoInfoItem


class SohuSpider(BaseSpider):

    name = "sohu"
    domain = "tv.sohu.com"
    url_prefix = "http://so.tv.sohu.com/"
    release_time_p = re.compile("\d{4}\-\d{2}\-\d{2} \d{2}:\d{2}")
    vid_p = re.compile('var vid.*?=.*?(\'|")(\d+?)(\'|");')
    cid_p = re.compile('var cid.*?=.*?(\'|")(\d*?)(\'|");')
    play_times_p = re.compile('count(:|=)(\d+?)(,|$)')
    
    def __init__(self, query, rdir, start_time, content_id):
        self.query = query.strip()
        self.rdir = rdir
        self.start_time = start_time
        self.content_id = content_id
        self.fetch_time = datetime.date.today()

    def start_requests(self):
        url = self.url_prefix + "mts?wd=%s" % urllib.quote(self.query)
        yield Request(url, callback=self.parse_search)
        
    def parse_search(self, response):
        video_lx = SgmlLinkExtractor(allow=("http://tv\.sohu\.com/.*?\.shtml$"))
        video_links = video_lx.extract_links(response)
        for link in video_links:
            url = link.url
            yield Request(url, meta={"url": url, "ugc": False})

        video_lx = SgmlLinkExtractor(allow=("http://my\.tv\.sohu\.com/.*?\.shtml$"))
        video_links = video_lx.extract_links(response)
        for link in video_links:
            url = link.url
            yield Request(url, meta={"url": url, "ugc": True})

        sel = Selector(response)
        next_page_info_lst = sel.xpath("//a[@class='next']/text()").extract()
        if next_page_info_lst and next_page_info_lst[0] == u"下一页":
            next_page_url = self.url_prefix + sel.xpath("//a[@class='next']/@href").extract()[0]
            yield Request(next_page_url, callback=self.parse_search)
            
    def parse(self, response):
        try:
            vid = self.vid_p.search(response.body_as_unicode()).group(2)
            cid = self.cid_p.search(response.body_as_unicode()).group(2)
        except Exception:
            log.msg("url:%s has no vid or cid" % response.url, level=log.WARNING)
            return

        meta = response.meta
        meta["vid"] = vid
        meta["cid"] = cid
        meta["url"] = response.url

        sel = Selector(response)
        meta["title"] = u"".join(sel.xpath("//h1[@id='video-title']/@title "
                                           "|"
                                           " //div[@id='crumbsBar']//h2/text() "
                                           "|"
                                           " //h1/text()").extract()).strip()
        meta["username"] = u"".join(sel.xpath("//div[@class='user-info']//h3/a/text()").extract()).strip()

        origin_release_time = u"".join(sel.xpath("//div[@class='user-info']//div[@class='r user-info-b']/p[2]/text()"
                                                 "|"
                                                 "//div[@class='info info-con']/ul/li[@class='h'][1]/text()").extract()).strip()
        release_time_m = self.release_time_p.search(origin_release_time)
        meta["release_time"] = release_time_m.group().strip() if release_time_m is not None else u""

        origin_crumbs_lst = sel.xpath("//div[@class='nav']/a/text()"
                                      "|"
                                      "//div[@id='contentA']/div[@class='left']/div[@class='crumb']//text()"
                                      "|"
                                      "//div[@class='crumbs']//text()").extract()

        meta["crumbs"] = u"".join(origin_crumbs_lst).strip() if ((u">" in origin_crumbs_lst) or (u"/" in origin_crumbs_lst)) else u">".join(origin_crumbs_lst).strip()

        meta["text_short"] = u""

        meta["description"] = u"".join(sel.xpath("//div[@id='info']//*[local-name() != 'script']//text()"
                                                 "|"
                                                 "//div[@id='playlist']//text()"
                                                 "|"
                                                 "//p[@class='videoDes cfix']/text()").extract()).strip()

        if meta["ugc"]:
            play_count_url = "http://vstat.my.tv.sohu.com/dostat.do?method=getVideoPlayCount&v=%s" % vid
        else:
            play_count_url = ("http://count.vrs.sohu.com/count/stat.do?"
                              "videoId=%s&tvid=1173468&playlistId=6535203&"
                              "categoryId=8&catecode=107104&uid=13935550002195584483&"
                              "plat=flash&os=Linux&online=0&type=vrs&"
                              "r=http%%3A//tv.sohu.com/20140301/n395869190.shtml&t=1393643919923.6252" % vid)

        yield Request(play_count_url, meta=meta, callback=self.parse_play_times)
    
    def parse_play_times(self, response):
        meta = response.meta
        play_times_m = self.play_times_p.search(response.body_as_unicode().strip())
        meta['play_times'] = play_times_m.group(2) if play_times_m is not None else u""
        meta['play_times'] = int(meta['play_times']) if meta['play_times'].isdigit() else 0
        score_url = "http://score.my.tv.sohu.com/digg/get.do?vid=%s&type=%s" % (meta['vid'], meta['cid'])
        yield Request(score_url, meta=meta, callback=self.parse_up_down)
    
    def parse_up_down(self, response):
        meta = response.meta
        try:
            up_down_info = response.body_as_unicode().strip()[1:-1]
            up_down = json.loads(up_down_info)
            meta['up_times'] = up_down["upCount"]
            meta['down_times'] = up_down["downCount"]
        except Exception:
            log.msg("url:%s can't get up down info" % meta["url"], level=log.WARNING)
            meta['up_times'] = 0
            meta['down_times'] = 0

        comment_url = ("http://changyan.sohu.com/api/services/topic/load?"
                       "client_id=cyqyBluaj&outer_page_size=10&outer_page_no=1"
                       "&topic_url=%s"
                       "&style=terrace&refresh=true&order_by=time") % urllib.quote(meta["url"])

        yield Request(comment_url, meta=meta, callback=self.parse_comment)
        
    def parse_comment(self, response):
        meta = response.meta
        meta["comment_content"] = []
        meta["comment_num"] = 0
        try:
            comment_info = json.loads(response.body_as_unicode())
        except Exception:
            log.msg("url:%s can't get comment info" % meta["url"], level=log.WARNING)
            return self.construct_item(meta)

        meta['comment_num'] = comment_info['cmt_sum']
        meta['comment_content'] = []

        for comment in comment_info['comments']:
            try:
                meta['comment_content'].append({'username': comment['passport']['nickname'].encode("utf-8", "ignore"),
                                                'comments_content': comment['content'].encode("utf-8", "ignore")})
            except Exception:
                log.msg(traceback.format_exc(), level=log.WARNING)

        return self.construct_item(meta)

    def construct_item(self, meta):
        i = QueryVideoInfoItem()
        i_keys = set(["title", "username", "release_time", "play_times", "url",
                      "crumbs", "up_times", "down_times", "text_short", "description",
                      "comment_num", "comment_content", "ugc"])
        diff_keys = i_keys - set(meta.keys())
        if diff_keys:
            exception_info = "miss data of %s" % diff_keys
            raise Exception(exception_info)
        for k in i_keys:
            if type(meta[k]) == unicode:
                i[k] = meta[k].encode("utf-8", "ignore")
            else:
                i[k] = meta[k]
        return i
