# -*- coding: utf-8 -*-
import scrapy
import time
import re
import urllib.request
import os
from selenium import webdriver
from scrapy.http import Request
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import meipai.decoder


class MainSpider(scrapy.Spider):
    name = 'main'
    allowed_domains = ['meipai.com']
    start_urls = ['http://meipai.com/']
    headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) Apple\
    WebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36"}

    classification = {
        '热门': 'hot',
        '搞笑': 'square/13',
        '明星': 'square/16',
        '高颜值': 'square/474',
        '精选': 'square/488',
        '舞蹈': 'square/63',
        '音乐': 'square/62',
        '美食': 'square/59',
        '美妆': 'square/27',
        '吃秀': 'square/423',
        '宝宝': 'square/18',
        '宠物': 'square/6',
        '手工': 'square/450',
        '游戏': 'square/480',
        '运动': 'square/487',
        '穿秀': 'square/460'
    }
    url_key = classification.keys()


    def start_requests(self):
        url = 'http://www.meipai.com/medias/'
        if (not os.path.exists('美拍视频')):
            os.mkdir('美拍视频')
        for item in self.url_key:
            print('现在开始获取"' + item + '"标签下的url')
            if (not os.path.exists('美拍视频/' + item)):
                os.mkdir('美拍视频/' + item)
            Url = url + self.classification[item]
            # yield Request(Url,
            #                     meta={"cookiejar": 1},
            #                     headers=self.headers,
            #                     callback=self.per_class
            #                     )

            flag = 1
            while True:
                print('page:' + str(flag))
                tmp = Url + '/' + '?p=' + str(flag)
                # print(tmp)
                # input()
                yield Request(tmp,
                                meta={"cookiejar": 1},
                                headers=self.headers,
                                callback=self.per_class
                                )
                # print('here')
                data = urllib.request.urlopen(tmp).read().decode('utf-8')
                res = re.findall('下一页', data)

                if(len(res)):
                    flag += 1
                else:
                    break

    def per_class(self, response):
        # print('here')
        url = response.url

        #使用phantomjs模拟下拉浏览器获得所有视频
        dcap = dict(DesiredCapabilities.PHANTOMJS)
        dcap["phantomjs.page.settings.userAgent"] = "Mozilla/5.0(WindowsNT6.1;rv:2.0.1)Gecko/20100101Firefox/4.0.1"
        browser = webdriver.PhantomJS(desired_capabilities=dcap)

        browser.get(url)

        for i in range(10):
            # browser.execute_script('window.scrollTo(0,document.body.scrollHeight)')
            js = 'window.scrollTo(' + str(i * 1280) + ',' + str((i + 1) * 1280) + ')'
            browser.execute_script(js)
            # time.sleep(1)

        # print('here')
        data = browser.page_source
        patid = '<a class="home-title" href="/medias/.*?">(.*?)</a>'
        patname = 'class="content-l-p pa" title="(.*?)">'
        patvideo = 'data-video="(.*?)"'
        # print('here')
        res = re.findall(patid, data)
        ID = res[0]
        if(not os.path.exists('美拍视频/'+ID)):
            os.mkdir('美拍视频/'+ID)
        # print('here')
        name = re.findall(patname, data)
        video = re.findall(patvideo, data)

        for n, v in zip(name, video):
            #解析视频地址的算法在meipai.decoder里
            url = meipai.decoder.Decode(v)
            print(n + '正在爬取')
            try:
                urllib.request.urlretrieve(url, '美拍视频/'+ID+'/'+n+'.mp4')
                print('爬取成功')
            except:
                print('爬取失败')
            time.sleep(5)