from scrapy_redis.spiders import RedisSpider
import scrapy
import re
import json
import MySQLdb
import requests

class AnimeSpider(RedisSpider):
    domain='http://www.vultr1.com'

    name = 'Anime'
    allowed_domains = ['www.vultr1.com']
    start_urls=['http://www.vultr1.com/']

    MYSQL_HOST = "127.0.0.1"
    MYSQL_PORT = 3306
    MYSQL_USER = "root"
    MYSQL_PWD = "1741750591"
    MYSQL_DB = "www_vultr1_com"

    def __init__(self):
        dict = {}
        dict["host"] = self.MYSQL_HOST
        dict["port"] = self.MYSQL_PORT
        dict["user"] = self.MYSQL_USER
        dict["passwd"] = self.MYSQL_PWD
        dict["db"] = self.MYSQL_DB
        dict["use_unicode"]=True
        dict["charset"]="utf8"
        self.db = MySQLdb.connect(**dict)
        #self.playerconfig()

    def playerconfig(self):
        res = requests.get(self.domain + "/static/js/playerconfig.js")
        data = re.search(r"player_list=(.*?),MacPlayerConfig.downer_list", res.text).group(1)
        self.player_list = json.loads(data)
        cursor = self.db.cursor()
        for k, v in self.player_list.items():
            sql = "INSERT INTO source SET `name`='" + k + "',"
            sql += " `desc`='" + v["des"] + "',"
            sql += " `parse`='" + v["parse"] + "',"
            sql += " `show`='" + v["show"] + "'"
            cursor.execute(sql)
            print(sql)
        self.db.commit()
        # self.db.close()
        # print(self.player_list)

    def parse(self, response):
        href_list = response.xpath("//ul[@class='stui-header__menu type-slide']/li/a/@href")[1:-1]
        for href in href_list:
            _type=re.search(r"v/(\d*).html",href.get()).group(1)
            yield scrapy.Request(self.domain+href.get(),callback=self.parse_item,meta={"type":_type})

    #处理专题
    def parse_item(self, response):
        a_list=response.xpath("//a[@class='stui-vodlist__thumb lazyload']/@href")
        for a in a_list:
            yield scrapy.Request(
                    self.domain+a.extract(),
                    callback=self.parse_detail,
                    meta={"type": response.meta["type"]}
                )
            #print(self.domain + a.extract() +"---已抓取")

        next_url = response.xpath("//ul[@class='stui-page text-center clearfix']/li/a[text()='下1页']/@href").extract_first(default="")
        last_url = response.xpath("//ul[@class='stui-page text-center clearfix']/li/a[text()='尾页']/@href").extract_first(default="")
        active_url=response.xpath("//ul[@class='stui-page text-center clearfix']/li[@class='hidden-xs active']/a/@href").extract_first(default="")
        if (next_url != None) and active_url!=last_url:
            #print("---------------------")
            print("next_url:"+self.domain + next_url + "---已抓取")
            yield scrapy.Request(self.domain + next_url, callback=self.parse_item,meta={"type":response.meta["type"]})
        else:
            print("数据已爬完.......")

    #处理详情页
    def parse_detail(self,response):
        item={}
        item["table"]="video"
        item["id"]=re.search(r"video/(\d*).html",response.request.url).group(1)
        item['thumb']=response.xpath("//img[@class='lazyload']/@src").get(default="")
        item['title']=response.xpath("//h1[@class='title']/text()").extract_first(default="")
        #item['type']=response.xpath("//p[@class='data ']/a/text()").extract_first()
        item["type"]=response.meta["type"]
        item['area']=response.xpath("//p[@class='data ']/text()[4]").extract_first(default="").replace('\t','')
        item['year'] = response.xpath("//p[@class='data ']/text()[6]").extract_first(default="").replace('\t','').replace('\r\n','')
        item['director']=response.xpath("//p[@class='data'][2]/a/text()").get(default="")
        item['introduction']=""
        sketch=response.xpath("//span[@class='detail-sketch']/text()")
        content=response.xpath("//span[@class='detail-content']/text()")
        if sketch:
            item['introduction'] +=sketch.get(default="")
        if content:
            item['introduction'] += sketch.get(default="")

        item['actors'] =""
        for s in response.xpath("//p[@class='data'][1]/a/text()"):
            item['actors']+=s.extract()+" "
        yield item

        ul=response.xpath("//ul[@class='stui-content__playlist clearfix']/li/a/@href")

        for a in ul:
            #print(self.domain+a.get())
            #第几集
            part=re.search(r"-(\d*).html", a.get()).group(1)
            yield scrapy.Request(self.domain+a.get(),callback=self.parse_play,meta={"part":part,"video_id":item["id"]})

    #处理视频播放页
    def parse_play(self,response):
        item={}
        item["part"]=response.meta["part"]
        item["video_id"] = response.meta["video_id"]
        item["table"]="play"
        player_data=re.search(r"player_data=(.*?)</script>",response.text).group(1)
        player_data=json.loads(player_data)
        item["from"]=player_data["from"]
        item["url"]=player_data["url"]
        yield item






