# -*- coding: utf-8 -*-
import scrapy
import  re
import json
from lxml import etree
from video.items import VideoItem
from urllib import request
import base64


class Hao123Spider(scrapy.Spider):
    name = 'hao123'
    allowed_domains = ['v.hao123.baidu.com']
    start_urls = []

    def start_requests(self):
        # self.num=0
        # url="http://v.hao123.baidu.com/dongman/2586.htm?title=%E5%95%86%E5%9C%A3%E8%8C%83%E8%A0%A1"
        # # yield scrapy.Request(url, callback=self.tvshowdetail)
        # yield scrapy.Request(url, callback=self.dongmandetail)
        urls = [
            "http://v.hao123.baidu.com/v/search?channel=tvplay&pn=1",
             "http://v.hao123.baidu.com/v/search?channel=movie&pn=1",
             "http://v.hao123.baidu.com/v/search?channel=tvshow&pn=1",
            "http://v.hao123.baidu.com/v/search?channel=comic&pn=1",
        ]
        for url in urls:
            self.num=0
            yield scrapy.Request(url, callback=self.parse)
    def parse(self, response):
        urls = response.xpath('//div[contains(@class, "result clearfix") ]/ul/li/a/@href').extract()
        for url in urls:
            if "dianshi"  in url:
                yield scrapy.Request(response.urljoin(url), callback=self.tvplaydetail)
            if "dianying" in url:
                yield scrapy.Request(response.urljoin(url), callback=self.moviedetail)
            if "zongyi" in url:
                yield scrapy.Request(response.urljoin(url), callback=self.tvshowdetail)
            if "dongman" in url:
                yield scrapy.Request(response.urljoin(url), callback=self.dongmandetail)
        nexturl = response.xpath('//a[contains(@class, "next-btn") ]/@href').extract()
        if len(nexturl)==1:
            yield scrapy.Request(response.urljoin(nexturl[0]), callback=self.parse)

    def dongmandetail(self, response):
        try:
            tv = VideoItem()
            tv["type"] = "4"
            tv["img"] = ""
            tv["videoname"] = ""
            tv["otherdata"] = ""
            tv["director"] = ""
            tv["area"] = ""
            tv["decade"] = ""
            tv["category"] = ""
            tv["Starring"] = ""
            tv["Introduction"] = ""
            tv["urldata"] = ""
            tv["img"] = "".join(response.xpath('//*[@id="surface"]/img/@src').extract()).replace("\"", "").replace("\'",
                                                                                                                   "")
            if (tv["img"] == ""):
                print("\t\t无法获取图片信息")
            tv["videoname"] = "".join(
                response.xpath('//div[contains(@class, "area1 clearfix") ]/h1/text()').extract()).replace(
                "\"", "").replace("\'", "")
            if (tv["videoname"] == ""):
                print("\t\t没有名字信息")
            tv["otherdata"] = "".join(response.xpath('//em[contains(@class, "updateto") ]/text()').extract()).replace(
                "\"",
                "").replace(
                "\'", "")
            detail = response.xpath('//div[contains(@class, "area2 clearfix") ]/p')
            for x in detail:
                if "导演" in "".join(x.xpath("text()").extract()):
                    tv["director"] = " ".join(x.xpath("a/text()").extract()).replace("\"", "").replace("\'", "")
                elif "国家/地区" in "".join(x.xpath("text()").extract()):
                    tv["area"] = " ".join(x.xpath("a/text()").extract()).replace("\"", "").replace("\'", "")
                elif "年代" in "".join(x.xpath("text()").extract()):
                    tv["decade"] = " ".join(x.xpath("a/text()").extract()).replace("\"", "").replace("\'", "")
                elif "类型" in "".join(x.xpath("text()").extract()):
                    tv["category"] = " ".join(x.xpath("a/text()").extract()).replace("\"", "").replace("\'", "")
                elif "主演" in "".join(x.xpath("text()").extract()):
                    tv["Starring"] = " ".join(x.xpath("a/text()").extract()).replace("\"", "").replace("\'", "")
            jianjie = " ".join(response.xpath("//p[contains(@class, 'abstract') ]/a/@data-content").extract())
            if jianjie != "":
                tv["Introduction"] = base64.b64encode(jianjie.encode('utf-8'))
            else:
                tv["Introduction"] = base64.b64encode(
                    " ".join(response.xpath("//p[contains(@class, 'abstract') ]/span/text()").extract()).encode(
                        'utf-8'))
            sites = re.findall('data-site=\"(.*?)\"', response.text)
            id = re.search("v.hao123.baidu.com/dongman/(.*?).htm", response.url).group(1)
            dataall = {}
            for site in sites:
                dataurl = "http://v.hao123.baidu.com/dongman/" + id + ".htm?site=" + site + "&v=1532497610196&__quickling__=episode.0&t=235600"
                selector = etree.HTML(json.loads(request.urlopen(dataurl).read().decode('utf-8'))[0]["html"]["html"])
                num = selector.xpath('//a/text()')
                dong = selector.xpath('//a/@href')
                data = []
                for x in range(0, len(dong)):
                    video = {
                        "number": "",
                        "defaultUrl": "",
                        "v_cover": "",
                        "videoid": "",
                        "title": "",
                    }
                    try:
                        video["number"] = str(num[x])
                    except:
                        video["number"] = ""
                    try:
                        video["defaultUrl"] = dong[x]
                    except:
                        video["defaultUrl"] = ""
                    data.append(video)
                dataall[site] = data
            tv["urldata"] = base64.b64encode(json.dumps(dataall).encode('utf-8'))
            yield tv
            self.num += 1
            print("\t爬取：第" + str(self.num) + "个视频成功" + response.url)
        except:
            fo = open("foo.txt", "a+")
            fo.write(response.url+"\n");
            fo.close()
    def tvshowdetail(self, response):
        try:
            tv = VideoItem()
            tv["type"] = "3"
            tv["img"] = ""
            tv["videoname"] = ""
            tv["otherdata"] = ""
            tv["director"] = ""
            tv["area"] = ""
            tv["decade"] = ""
            tv["category"] = ""
            tv["Starring"] = ""
            tv["Introduction"] = ""
            tv["urldata"] = ""
            tv["img"] = "".join(response.xpath('//div[contains(@class, "poster") ]/a/img/@src').extract()).replace("\"",
                                                                                                                   "").replace(
                "\'", "")
            if (tv["img"] == ""):
                print("\t\t无法获取图片信息")
            tv["videoname"] = "".join(response.xpath('//div[contains(@class, "info") ]/h1/text()').extract()).replace(
                "\"",
                "").replace(
                "\'", "")
            if (tv["videoname"] == ""):
                print("\t\t没有名字信息")
            tv["otherdata"] = "".join(
                response.xpath('//div[contains(@class, "info") ]/h1/span/text()').extract()).replace("\"",
                                                                                                     "").replace(
                "\'", "")
            tv["director"] = " ".join(
                response.xpath('//span[contains(@class, "host") ]/span/a/text()').extract()).replace("\"",
                                                                                                     "").replace(
                "\'", "")
            tv["Starring"] = " ".join(
                response.xpath('//span[contains(@class, "guest") ]/span/a/text()').extract()).replace("\"",
                                                                                                      "").replace(
                "\'", "")
            detail = response.xpath('//div[contains(@class, "info") ]/p/span')
            for x in detail:
                if "国家/地区" in "".join(x.xpath("span/text()").extract()):
                    tv["area"] = " ".join(x.xpath("a/text()").extract()).replace("\"", "").replace("\'", "")
                elif "年代" in "".join(x.xpath("span/text()").extract()):
                    tv["decade"] = " ".join(x.xpath("a/text()").extract()).replace("\"", "").replace("\'", "")
                elif "类型" in "".join(x.xpath("span/text()").extract()):
                    tv["category"] = " ".join(x.xpath("a/text()").extract()).replace("\"", "").replace("\'", "")

            jianjie = " ".join(response.xpath("//p[contains(@class, 'abstract') ]/input/@value").extract())
            if jianjie != "":
                tv["Introduction"] = base64.b64encode(jianjie.encode('utf-8'))
            else:
                tv["Introduction"] = base64.b64encode(
                    " ".join(response.xpath("//p[contains(@class, 'abstract') ]/em/text()").extract()).encode('utf-8'))

            id = re.search("v.hao123.baidu.com/zongyi/(.*?).htm", response.url).group(1)
            sources = response.xpath('// *[ @ id = "source"] / div / div / a/@data-source').extract()
            if sources == []:
                sources = response.xpath('//*[@id="source"]/span/em/@data-source').extract()
            dataall = {}
            for source in sources:
                data = {}
                dataurl = "http://v.hao123.baidu.com/vapi/getAllTvShow?year=" + "2018" + "&id=" + str(
                    id) + "&resource=" + source
                years = json.loads(request.urlopen(dataurl).read().decode('utf-8'))["data"]["yearAll"]
                for year in years:
                    dataurl = "http://v.hao123.baidu.com/vapi/getAllTvShow?year=" + str(
                        year) + "&id=" + id + "&resource=" + source
                    urldata = json.loads(request.urlopen(dataurl).read().decode('utf-8'))["data"]["objs"]
                    objs = []
                    for x in urldata:
                        video = {
                            "number": "",
                            "defaultUrl": "",
                            "v_cover": "",
                            "videoid": "",
                            "title": "",
                        }
                        try:
                            video["number"] = x["period"]
                        except:
                            video["number"] = ""
                        try:
                            video["defaultUrl"] = x["defaultUrl"]
                        except:
                            video["defaultUrl"] = ""
                        try:
                            video["v_cover"] = x["v_cover"]
                        except:
                            video["v_cover"] = ""
                        try:
                            video["videoid"] = x["videoid"]
                        except:
                            video["videoid"] = ""
                        try:
                            video["title"] = x["title"]
                        except:
                            video["title"] = ""
                        objs.append(video)
                    data[year] = objs
                dataall[source] = data
            tv["urldata"] = base64.b64encode(json.dumps(dataall).encode('utf-8'))
            self.num += 1
            print("\t爬取：第" + str(self.num) + "个视频成功" + response.url)
            yield tv
        except:
            fo = open("foo.txt", "a+")
            fo.write(response.url + "\n");
            fo.close()

    def moviedetail(self, response):
        try:
            tv = VideoItem()
            tv["type"] = "2"
            tv["img"] = ""
            tv["videoname"] = ""
            tv["otherdata"] = ""
            tv["director"] = ""
            tv["area"] = ""
            tv["decade"] = ""
            tv["category"] = ""
            tv["Starring"] = ""
            tv["Introduction"] = ""
            tv["urldata"] = ""
            tv["img"] = "".join(response.xpath('//div[contains(@class, "poster") ]/a/img/@src').extract())
            if (tv["img"] == ""):
                print("\t\t无法获取图片信息")
            tv["videoname"] = "".join(
                response.xpath('//div[contains(@class, "items clearfix") ]/h1/text()').extract()).replace(
                "\"", "").replace("\'", "")
            if (tv["videoname"] == ""):
                print("\t\t没有名字信息")
            tv["otherdata"] = "".join(
                response.xpath('//div[contains(@class, "items clearfix") ]/span/text()').extract()).replace(
                "\"", "").replace("\'", "")
            detail = response.xpath('//div[contains(@class, "info") ]/p/span')
            for x in detail:
                if "导演" in "".join(x.xpath("em/text()").extract()):
                    tv["director"] = " ".join(x.xpath("a/text()").extract()).replace("\"", "").replace("\'", "")
                elif "国家/地区" in "".join(x.xpath("em/text()").extract()):
                    tv["area"] = " ".join(x.xpath("a/text()").extract()).replace("\"", "").replace("\'", "")
                elif "年代" in "".join(x.xpath("em/text()").extract()):
                    tv["decade"] = " ".join(x.xpath("a/text()").extract()).replace("\"", "").replace("\'", "")
                elif "类型" in "".join(x.xpath("em/text()").extract()):
                    tv["category"] = " ".join(x.xpath("a/text()").extract()).replace("\"", "").replace("\'", "")
                elif "主演" in "".join(x.xpath("em/text()").extract()):
                    tv["Starring"] = " ".join(x.xpath("a/text()").extract()).replace("\"", "").replace("\'", "")
            jianjie = " ".join(response.xpath("//p[contains(@class, 'abstract') ]/input/@value").extract())
            if jianjie != "":
                tv["Introduction"] = base64.b64encode(jianjie.encode('utf-8'))
            else:
                tv["Introduction"] = base64.b64encode(
                    " ".join(response.xpath("//p[contains(@class, 'abstract') ]/em/text()").extract()).encode('utf-8'))
            videourls = response.xpath('//div[contains(@class, "list-container") ]/ul/li/a')
            dataall = []
            for videourl in videourls:
                urldata = {"site": "", "defaultUrl": ""}
                urldata["site"] = "".join(videourl.xpath("@title").extract())
                urldata["defaultUrl"] = "".join(videourl.xpath("@href").extract())
                dataall.append(urldata)
            urldata = {"site": "", "defaultUrl": ""}
            urldata["site"] = " ".join(response.xpath("//a[contains(@class, 'play-btn') ]/@alog-text").extract())
            urldata["defaultUrl"] = " ".join(response.xpath("//a[contains(@class, 'play-btn') ]/@href").extract())
            dataall.append(urldata)
            tv["urldata"] = base64.b64encode(json.dumps(dataall).encode('utf-8'))
            yield tv
            self.num += 1
            print("\t爬取：第" + str(self.num) + "个视频成功" + response.url)
        except:
            fo = open("foo.txt", "a+")
            fo.write(response.url + "\n");
            fo.close()
    def tvplaydetail(self, response):
        try:
            match = re.search("episode: \'(.*?)\}\',", response.text)
            tv=VideoItem()
            tv["type"] = "1"
            tv["img"] = ""
            tv["videoname"] = ""
            tv["otherdata"] = ""
            tv["director"] = ""
            tv["area"] = ""
            tv["decade"] = ""
            tv["category"] = ""
            tv["Starring"] = ""
            tv["Introduction"] = ""
            tv["urldata"] = ""
            self.num+=1
            print("\t成功获取json数据：第" + str(self.num) + "个：" + response.url)
            tv["img"] = "".join(response.xpath('//*[@id="topicPage"]/img/@src').extract()).replace("\"", "").replace("\'", "")
            if (tv["img"] == ""):
                print("\t\t无法获取图片信息")
            tv["videoname"] = "".join(response.xpath('//div[contains(@class, "info") ]/h1/text()').extract()).replace("\"",
                                                                                                            "").replace(
                "\'", "")
            if (tv["videoname"] == ""):
                print("\t\t没有名字信息")
            tv["otherdata"] = "".join(response.xpath('//div[contains(@class, "info") ]/h1/span/em/text()').extract()).replace(
                "\"", "").replace("\'", "")
            detail = response.xpath('//div[contains(@class, "info") ]/p/span')
            for x in detail:
                if "导演" in "".join(x.xpath("em/text()").extract()):
                    tv["director"] = " ".join(x.xpath("a/text()").extract()).replace("\"", "").replace("\'", "")
                elif "国家/地区" in "".join(x.xpath("em/text()").extract()):
                    tv["area"] = " ".join(x.xpath("a/text()").extract()).replace("\"", "").replace("\'", "")
                elif "年代" in "".join(x.xpath("em/text()").extract()):
                    tv["decade"] = " ".join(x.xpath("a/text()").extract()).replace("\"", "").replace("\'", "")
                elif "类型" in "".join(x.xpath("em/text()").extract()):
                    tv["category"] = " ".join(x.xpath("a/text()").extract()).replace("\"", "").replace("\'", "")
                elif "主演" in "".join(x.xpath("em/text()").extract()):
                    tv["Starring"] = " ".join(x.xpath("a/text()").extract()).replace("\"", "").replace("\'", "")
            jianjie = " ".join(response.xpath("//p[contains(@class, 'abstract') ]/input/@value").extract())
            if jianjie != "":
                tv["Introduction"] = base64.b64encode(jianjie.encode('utf-8'))
            else:
                tv["Introduction"] = base64.b64encode(
                    " ".join(response.xpath("//p[contains(@class, 'abstract') ]/em/text()").extract()).encode('utf-8'))
            st = match.group(1)
            st = st.replace("\/", "/")
            st = st + "}"
            try:
                js = json.loads(st)
                print("\tjson解析成功")
            except:
                print("\tjson 无法获取解析")
            dataall = {}
            for website in js:
                web_all = js[website]
                data = []
                for x in web_all:
                    video = {
                        "number": "",
                        "defaultUrl": "",
                        "v_cover": "",
                        "videoid": ""
                    }
                    try:
                        video["number"] = x["number"]
                    except:
                        video["number"] = ""
                    try:
                        video["defaultUrl"] = x["defaultUrl"]
                    except:
                        video["defaultUrl"] = ""
                    try:
                        video["v_cover"] = x["v_cover"]
                    except:
                        video["v_cover"] = ""
                    try:
                        video["videoid"] = x["videoid"]
                    except:
                        video["videoid"] = ""
                    data.append(video)
                dataall[website] = data
            tv["urldata"] = base64.b64encode(json.dumps(dataall).encode('utf-8'))
            yield tv
        except:
            fo = open("foo.txt", "a+")
            fo.write(response.url + "\n");
            fo.close()
