# -*- coding: utf-8 -*-
import os

import scrapy
# from pip._vendor import requests
from scrapy import Selector

from pachon.DownloadItem import DownloadItem
from pachon.ImageItem import ImageItem


def parse_movie(response):
    try:
        current_dirname = response.meta['dirname']
        file_root = response.meta['file_root']
        root_url = response.meta['root_url']
        # print(current_dirname)
        # print(root_url)

        uls = response.xpath('//div[@class="box movie2_list"]/ul')
        for current_ul in uls:
            pageOptions = current_ul.xpath("//div[@class='box movie_list']/select/option")
            for current_page in pageOptions:
                current_link = current_page.xpath('.//@value').extract_first()
                # print(root_url + current_link)
                yield scrapy.Request(root_url + current_link,
                                     meta={'current_dirname': current_dirname, 'file_root': file_root, 'root_url':
                                         root_url}, callback=parse_movie_page)

    except Exception as e:
        print(e)


def parse_movie_page(response):
    try:
        current_dirname = response.meta['current_dirname']
        file_root = response.meta['file_root']
        root_url = response.meta['root_url']
        # print(current_dirname)
        # print(root_url)

        uls = response.xpath('//div[@class="box movie2_list"]/ul')
        for current_ul in uls:
            links = current_ul.xpath(".//li/a")
            for current_li in links:
                filename = current_li.xpath('.//h3/text()').extract_first()
                file_url = current_li.xpath('.//@href').extract_first()
                # print(filename)

                file_date = current_li.xpath(
                    './/span[@class="movie_date"]/span[@class="bg_top"]/text()').extract_first()[0:10]
                # print(file_date)
                # print("文件名："+filename+"("+file_date+")")
                # print(root_url + file_url)
                yield scrapy.Request(root_url + file_url,
                                     meta={'current_dirname': current_dirname, 'file_root': file_root,
                                           'filename': filename, 'file_date': file_date},
                                     callback=parse_movie_detail)

    except Exception as e:
        print(e)


def parse_movie_detail(response):
    try:
        filename = response.meta['filename'] + '.mp4'
        file_date = response.meta['file_date']
        current_dirname = response.meta['current_dirname']
        file_root = response.meta['file_root']
        currentpath = file_root + '\\' + current_dirname + '\\' + file_date
        # print(filename+'  '+file_date)
        if not os.path.exists(currentpath):
            os.mkdir(currentpath)

        # print(response.url)
        download_url = response.xpath('//div[@class="wrap mt20"]/div[@align="center"][position()=2]/b/font/a/text()').extract_first()
        # print(download_url)
        # print('开始下载：' + currentpath + '\\' + filename)
        downloaditem = DownloadItem()
        downloaditem['file_url'] = download_url
        downloaditem['filename'] = filename
        downloaditem['file_floder'] = currentpath
        # print(download_url)
        return downloaditem
        # write_file(currentpath+'\\'+filename+'.mp4', getText(download_url))
    except Exception as e:
        print(e)


def parse_image(response):
    # print(response.url)
    current_dirname = response.meta['dirname']
    file_root = response.meta['file_root']
    root_url = response.meta['root_url']
    image_links = response.xpath('//div[@class="box list channel"]/ul/li/a')
    # print(image_links)
    for current_link in image_links:
        pageOptions = current_link.xpath('//div[@class="pagination"]/select/option')
        # print(pageOptions)
        for current_page in pageOptions:
            page_link = current_page.xpath('.//@value').extract_first()
            yield scrapy.Request(root_url + page_link, meta={'file_root': file_root, 'current_dirname': current_dirname,
                                                             'root_url': root_url}, callback=parse_image_page)


def parse_image_page(response):
    current_dirname = response.meta['current_dirname']
    file_root = response.meta['file_root']
    root_url = response.meta['root_url']
    image_links = response.xpath('//div[@class="box list channel"]/ul/li/a')
    # print(image_links)
    for current_link in image_links:
        image_name = current_link.xpath('./text()').extract_first()
        image_url = current_link.xpath('./@href').extract_first()
        image_date = current_link.xpath('.//span/text()').extract_first()[0:10]
        # print('图片：'+image_name+'('+image_url+')'+image_date)
        currentpath = file_root + '\\' + current_dirname + '\\' + image_date
        yield scrapy.Request(root_url + image_url, meta={'currentpath': currentpath, 'filename': image_name},
                             callback=parse_image_detail)


def parse_image_detail(response):
    image_path = os.path.join(response.meta['currentpath'], response.meta['filename'])
    # print(image_path)
    if not os.path.exists(image_path):
        os.makedirs(image_path)
    images_url = response.xpath(
        '//div[@class="wrap mt20"]/div[@class="box pic_text"]/div[@class="content"]/p/img/@src').extract()
    # print(images_url)
    # for current_src in images_url:
    # image_src = current_image.xpath('./@src').extract_first()
    # print(image_src)
    imageitem = ImageItem()
    imageitem['image_url'] = images_url
    imageitem['image_floder'] = image_path
    # print(imageitem)
    return imageitem


'''
def getText(url):
    try:
        header = {'User-Agent': 'Mozilla/5.0'}
        r = requests.get(url, timeout=100000000, headers=header, stream=True)
        r.raise_for_status()
        return r.content
    except Exception as e:
        print("申请视频错误")


def write_file(path, content):
    if not os.path.exists(path):
        with open(path, "wb") as file:
            file.write(content)
            file.flush()
    else:
        pass
'''


class MySpider(scrapy.Spider):
    name = 'mySpider'
    allowed_domains = ['www.445mz.com']
    start_urls = ['https://www.445mz.com/home.html']

    def parse(self, response):
        file_root = 'F:\\电影\\爬虫'
        root_url = 'https://www.445mz.com'
        try:
            # print(response.url)
            data = response.body.decode()
            # print(data)
            selector = Selector(text=data)

            # print(selector)
            uls = selector.xpath('//nav[@class="menu"]/ul[position()<5]')
            # print(uls)
            for current_ul in uls:
                # print(current_ul.extract())
                links = current_ul.xpath(".//li[position()>1]/a")
                current_type = current_ul.xpath('.//li[position()=1]/a/@href').extract_first().replace('/', '').strip()
                # print(current_type)
                # print(links)
                for current_li in links:
                    dirname = current_li.xpath(".//text()").extract_first()
                    dir_linkurl = current_li.xpath(".//@href").extract_first()
                    # print(dirname+' '+dir_linkurl)
                    if not os.path.exists(file_root + '\\' + dirname):
                        os.mkdir(file_root + '\\' + dirname)
                        # print("创建成功")

                    url = response.urljoin(dir_linkurl)

                    # print(url)
                    if 'move' == current_type:
                        yield scrapy.Request(url,
                                             meta={'dirname': dirname, 'file_root': file_root, 'root_url': root_url}
                                             , callback=parse_movie)
                    elif current_type == 'pic':

                        yield scrapy.Request(url,
                                             meta={'dirname': dirname, 'file_root': file_root, 'root_url': root_url}
                                             , callback=parse_image)

                    elif current_type == 'text':
                        print('in text')
                    elif current_type == 'down':
                        print('in down')
                    else:
                        print('not in')
        except Exception as e:
            print(e)
