# -*- coding: utf-8 -*-
import random

import scrapy
from ..items import DoubanItem
import json
import requests
from lxml import etree
from ..settings import USER_AGENTS

class DoubanspiderSpider(scrapy.Spider):
    name = 'doubanSpider'
    allowed_domains = ['movie.douban.com']
    start_urls = ['http://movie.douban.com/j/new_search_subjects?sort=U&range=0,10&tags=&start=0']
    nextPage = 0

    def parse(self, response):
        '''
            title = scrapy.Field()  # 电影名称
            rate = scrapy.Field()  # 评分
            url = scrapy.Field()  # 电影详情页
            cover = scrapy.Field()  # 电影图片
            directors = scrapy.Field()  # 导演
            film_length = scrapy.Field()  # 影片时间
            info = scrapy.Field()
            imdb = scrapy.Field()  # imdb链接
            indent = scrapy.Field()  # 影片简介
        '''
        print('='*20, response.text)
        data_li = json.loads(response.text)
        for data in data_li.get('data'):
            item = DoubanItem()

            try:
                item['title'] = data.get('title')
                item['rate'] = data.get('rate')
                item['url'] = data.get('url')
                item['cover'] = data.get('cover')

                directors = ''
                for i in data.get('directors'):
                    directors += i+'#'
                item['directors'] = directors

            except Exception as e:
                print('<->'*10, str(e))

            if data.get('url'):
                try:
                    url = 'http' + data.get('url')[5:]
                    req = requests.get(url=url, headers={'User-Agent':random.choice(USER_AGENTS)})
                    html = etree.HTML(req.text)

                    item['film_length'] = str(html.xpath('//*[@id="info"]/span[@property="v:runtime"]/@content')[0])
                    item['imdb'] = html.xpath('//*[@id="info"]/a/@href')[0]

                    try:
                        info = ''
                        for i in html.xpath('//*[@id="info"]/text()'):
                            info += i.replace('\n', '').strip()
                        item['info'] = info
                    except:
                        item['info'] = 'error'

                    try:
                        indent = ''
                        for i in html.xpath('//*[@id="link-report"]/span/text()'):
                            indent += i.strip()
                        item['indent'] = indent
                    except:
                        item['indent'] = 'error'


                except Exception as e:
                    print('<>'*10, str(e))

                print('==========> doubam_item', item)
            yield item

        self.nextPage += 1
        next_url = 'http://movie.douban.com/j/new_search_subjects?sort=U&range=0,10&tags=&start=' + str(20 * self.nextPage)
        yield scrapy.http.Request(next_url, callback=self.parse)
