# -*- coding: utf-8 -*-
import json
import re

import scrapy
from oauth2 import u
from scrapy import Request
from bs4 import BeautifulSoup

from MySpiders.items import DyItem


class LolDytt(scrapy.Spider):
    name = 'loldy'
    start_urls = (
        'http://www.loldytt.com/index.html',
    )

    def __init__(self):
        self.baseUrl = 'http://www.loldytt.com'

    def parse(self, response):

        soup = BeautifulSoup(response.body, "html5lib")
        #格式化输html内容
        #print content[0].prettify()

        #看点推荐
        kandian = soup.findAll('div', class_="kandian")
        str = kandian[0].findAll('div', class_="rq")[0].get_text()
        label = str[len(str) - 4:len(str)]
        content = kandian[0].findAll('div', class_="commend")
        items = content[0].findAll('a', class_='db')
        for item in items:
            dyItem = DyItem()
            name = item.get('title')
            url =  item.get('href')

            dyItem["name"] = name
            dyItem["url"] = url
            dyItem["label"] = label
            # print name
            # print url
            # print label
            yield Request(url, callback=self.parse_detial, meta={'item':dyItem})

        #排行
        paihangs = soup.findAll('div',  attrs ={"class": re.compile(r'youce*')})
        for paihang in paihangs:
            label = paihang.findAll('div', class_="yph")[0].get_text()
            #print label
            items = paihang.findAll('li')
            for item in items:
                dyItem = DyItem()

                url = item.findAll('a')[0].get('href')
                name = item.findAll('a')[0].get_text()

                dyItem["name"] = name
                dyItem["url"] = url
                dyItem["label"] = label
                # print name
                # print url
                yield Request(url, callback=self.parse_detial, meta={'item':dyItem})

        #最新
        fenleis = soup.findAll('div', attrs ={"class": re.compile(r'zuoce*')})
        #print len(fenleis)
        for feilei in fenleis:
            label = feilei.findAll('div', class_="leibie")[0].get_text()
            #print label
            items = feilei.findAll('li')
            for item in items:
                dyItem = DyItem()

                name = item.a.get_text()
                url =  item.a.get('href')

                time = item.p.get_text()

                dyItem["name"] = name
                dyItem["url"] = url
                dyItem["label"] = label
                dyItem["time"] = time
                #print name
                yield Request(url, callback=self.parse_detial, meta={'item':dyItem})

        # 热门排行
        fenleis = soup.findAll('div', class_="jqxz")
        for feilei in fenleis:
            label = feilei.findAll('div', class_="qy")[0].b.get_text()
            #print label
            items = feilei.findAll('li')
            for item in items:
                dyItem = DyItem()
                name = item.findAll('a')[0].get_text()
                url = item.findAll('a')[0].get('href')
                rank = item.findAll('span')[0].get_text()
                dyItem["name"] = name
                dyItem["url"] = url
                dyItem["label"] = label
                dyItem["rank"] = rank
                # print name
                # print url
                # print rank
                yield Request(url, callback=self.parse_detial, meta={'item':dyItem})

    #获取影片详情
    def parse_detial(self,response):
        baseUrl = "http://www.loldytt.com"
        dyItem = response.meta['item']
        soup = BeautifulSoup(response.body, "html5lib")
        content = soup.findAll('ul',class_="downurl")
        down_urls = []
        for items in content:
            item = items.findAll('li')
            urls = []
            for url in item :
                urls.append({
                    "downurl":url.find('a').get('href'),
                    "title":url.find('a').get('title')})
            down_urls.append(urls)

        #剧情介绍
        juqing = soup.findAll('div',class_="neirong")[0].get_text()
        #海报图片
        pic = soup.findAll('div',class_="haibao")[0].img.get('src').strip().strip('\n')
        #类别
        type_temp = soup.findAll('div',class_="lanmu")
        type = type_temp[0].ul.get_text().lstrip('\n').split('\n')[0]
        # print self.pic
        # print type
        #print name
        #print type

        dyItem["down_url"] = down_urls
        dyItem["type"] = type
        dyItem["juqing"] = juqing
        dyItem["pic"] = pic
        yield dyItem

