#-*- coding:utf-8 -*-
import urllib
import re
import json
import datetime
from urllib import urlencode

from scrapy import log
from scrapy.spider import BaseSpider
from scrapy.http import Request
from scrapy.http import TextResponse
from scrapy.selector import Selector
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor

from query_video_info.items import QueryVideoInfoItem


class YoukuSpider(BaseSpider):

    name = "youku"
    domain = "www.youku.com"
    url_prefix = "http://www.soku.com/search_video/q_"
    video_id_p = re.compile("videoId = '(.*?)';")
    show_id_p = re.compile("showid=\"(.*?)\";")
    page_p = re.compile("'page':(.*?),")

    def __init__(self, query, rdir, start_time, content_id):
        self.query = query.strip()
        self.rdir = rdir
        self.start_time = start_time
        self.content_id = content_id
        self.fetch_time = datetime.date.today()

    def start_requests(self):
        url = self.url_prefix + urllib.quote(self.query)
        yield Request(url, callback=self.parse_search)

    def parse_search(self, response):
        express_video_lx = SgmlLinkExtractor(allow=("http://v\.youku\.com/v_show/id_.*?\.html$"),
                                             restrict_xpaths=("//div[@class='sk-express']"))
        express_video_links = express_video_lx.extract_links(response)
        for link in express_video_links:
            url = link.url
            yield Request(url, meta={"url": url, "ugc": False})

        result_video_lx = SgmlLinkExtractor(allow=("http://v\.youku\.com/v_show/id_.*?\.html$"),
                                            restrict_xpaths=("//div[@class='sk-result']"))
        result_video_links = result_video_lx.extract_links(response)
        for link in result_video_links:
            url = link.url
            yield Request(url, meta={"url": url, "ugc": True})

        sel = Selector(response)

        #翻页
        next_page_info_lst = sel.xpath("//li[@class='next']/a/@title").extract()
        if next_page_info_lst and next_page_info_lst[0] == u"下一页":
            next_page_url = self.url_prefix + sel.xpath("//li[@class='next']/a/@href").extract()[0]
            yield Request(next_page_url, callback=self.parse_search)

    def parse(self, response):
        meta = response.meta
        meta["url"] = response.url
        try:
            show_id = self.show_id_p.search(response.body).group(1)
            video_id = self.video_id_p.search(response.body).group(1)
        except Exception:
            log.msg("url:%s has no show_id or video_id" % response.url, level=log.WARNING)
            return

        sel = Selector(response)

        meta["title"] = u"".join(sel.xpath("//h1[@class='title']//text()").extract()).strip()
        meta["username"] = u"".join(sel.xpath("//div[@class='userInfo']/div[@class='bar']/a/text()").extract()).strip()
        meta["crumbs"] = u">".join(sel.xpath("//div[@class='crumbs']/a/text()").extract()).strip()
        meta["text_short"] = u"".join(sel.xpath("//div[@id='text_short']//text()").extract())
        meta["description"] = u"".join(sel.xpath("//div[@class='showInfo']//text()").extract()).strip()

        meta["release_time"] = u"".join(sel.xpath("//div[@class='commentcon']/div[@class='con']/div[@class='panel']/span[@class='timestamp']/text()").extract()).strip()

        meta["up_times"] = u"".join(sel.xpath("//span[@id='upVideoTimes']/text()").extract()).strip().replace(u",", u"")
        meta["up_times"] = int(meta["up_times"]) if meta["up_times"].isdigit() else 0

        meta["down_times"] = u"".join(sel.xpath("//span[@id='downVideoTimes']/text()").extract()).strip().replace(u",", u"")
        meta["down_times"] = int(meta["down_times"]) if meta["down_times"].isdigit() else 0


        meta["show_id"] = show_id
        meta["video_id"] = video_id

        url = "http://v.youku.com/QVideo/~ajax/getVideoPlayInfo?id=%s&type=vv&catid=85" % video_id
        yield Request(url, meta=meta, callback=self.parse_play_times)

    def parse_play_times(self, response):
        meta = response.meta
        try:
            meta["play_times"] = json.loads(response.body_as_unicode())["vv"]
        except Exception:
            meta["play_times"] = 0

        meta["pn"] = u"1"
        meta["comment_content"] = []
        params = {
            "__ap": '{"videoid":"%s","page":1,"showid":"%s"}' % (meta["video_id"], meta["show_id"]),
            "__callback": 'displayComments'
        }
        url = "http://comments.youku.com/comments/~ajax/vpcommentContent.html?%s" % urlencode(params)
        yield Request(url, meta=meta, callback=self.parse_comments)

    def parse_comments(self, response):
        meta = response.meta
        try:
            data = json.loads(response.body[16:-1])
            data["totalSize"] = data["totalSize"].replace(",", "")
        except Exception:
            log.msg("url:%s can't get comment info" % meta["url"], level=log.WARNING)
            meta["comment_num"] = 0
            return self.construct_item(meta)
        meta["comment_num"] = int(data["totalSize"]) if data["totalSize"].isdigit() else 0
        html_body = data["con"]
        response = TextResponse(url=response.url, status=200, body=html_body.encode("utf-8", "ignore"), encoding="utf-8")
        sel = Selector(response)
        for sub_sel in sel.xpath("//div[@class='comment']"):
            user_name = u"".join(sub_sel.xpath("div[@class='commentcon']/div[@class='bar  ']/a/text()").extract()).strip().encode("utf-8", "ignore")
            comment_content = u"".join(sub_sel.xpath("div[@class='commentcon']/div[@class='con']/div[@class='text']/p//text()").extract()).strip().encode("utf-8", "ignore")
            meta["comment_content"].append({"username": user_name, "comments_content": comment_content})

        next_params = u"".join(sel.xpath("//li[@class='next'][1]/a/@onclick").extract()).strip()
        if next_params:
            try:
                page = self.page_p.search(next_params).group(1)
            except Exception:
                page = u"11"

            if page == u"11":
                return self.construct_item(meta)
            else:
                params = {
                    "__ap": '{"videoid":"%s","page":%s,"showid":"%s"}' % (meta["video_id"], page, meta["show_id"]),
                    "__callback": 'displayComments'
                }
                url = "http://comments.youku.com/comments/~ajax/vpcommentContent.html?%s" % urlencode(params)
                return Request(url, meta=meta, callback=self.parse_comments)
        else:
            return self.construct_item(meta)

    def construct_item(self, meta):
        i = QueryVideoInfoItem()
        i_keys = set(["title", "username", "release_time", "play_times", "url",
                      "crumbs", "up_times", "down_times", "text_short", "description",
                      "comment_num", "comment_content", "ugc"])
        diff_keys = i_keys - set(meta.keys())
        if diff_keys:
            exception_info = "miss data of %s" % diff_keys
            raise Exception(exception_info)
        for k in i_keys:
            if type(meta[k]) == unicode:
                i[k] = meta[k].encode("utf-8", "ignore")
            else:
                i[k] = meta[k]
        return i


