import scrapy
from gerapy_pyppeteer import PyppeteerRequest
from scrapy.utils.trackref import NoneType
from hronet.items import HronetItem
import time
from bs4 import BeautifulSoup
import requests
import base64
import htmlmin
from hronet.util.getRandomProxyUtil import getRandomProxyUtil


class PopluartravelspiderSpider(scrapy.Spider):
    name = 'popluarTravelSpider'
    allowed_domains = ['www.mafengwo.cn']
    start_urls = ['https://www.mafengwo.cn']
    cookies = {}
    
    def start_requests(self):
        
        # gerapy_pyppeteer无法使用代理，待技术解决
        # proxy = getRandomProxyUtil().getRandomProxy()
        
        # 函数传递数据 【dont_filter：重复请求都不过滤，scrapy如果是重复请求将会被中间件拦截抛出错误】【wait_for：等待id=‘#_j_tn’元素出现再返回页面】【domcontentloaded：元素加载完】
        yield PyppeteerRequest(url=self.start_urls[0], wait_until='networkidle0', callback=self.intoMainPage, sleep=30, wait_for='.pg-next', screenshot={
            'type': 'png',
            'fullPage': True
        }, dont_filter=True)
        
    # 进入主页
    def intoMainPage(self, response):
        # 首页HTML代码下载
        htmlFileName = 'hronet.html'
        with open (htmlFileName,'wb') as file_object:
            file_object.write(response.body)
        # 首页截图
        screenPngFileName = 'srcreenPng/screenshot.png'
        with open(screenPngFileName, 'wb') as file_object:
            file_object.write(response.meta['screenshot'].getbuffer())
            
        # 首页 第一页内容爬取
        itemList = response.xpath('/html/body/div[3]/div[2]/div/div[4]/div/div[1]/div')
        # gerapy_pyppeteer无法使用代理，待技术解决
        # proxy = getRandomProxyUtil().getRandomProxy()
        for item in itemList:
            aUrl = item.xpath('.//div[1]/a[1]/@href').get()
            mainPictureUrl = item.xpath('.//div[1]/a[1]/img/@src').get()
            # 【networkidle0：网页所有请求完成】
            yield PyppeteerRequest(url=self.start_urls[0]+aUrl, callback=self.parse, wait_until='networkidle0', meta={'mainPictureUrl': mainPictureUrl}, wait_for='.view_con', sleep=10, dont_filter=True)
            
        # 翻页 除第一页的其他页内容爬取
        span = response.xpath('/html/body/div[3]/div[2]/div/div[4]/div/div[2]/span[1]/text()').get()
        sumPage = int(span[span.find('共')+1:span.find('页')])
        print('总共' + str(sumPage) + '页')
        for i in range(sumPage - 1):
            script = 'document.getElementsByClassName("pi _j_pageitem")[' + str(i) + ']' + '.click()'
            # gerapy_pyppeteer无法使用代理，待技术解决
            # secondaryProxy = getRandomProxyUtil().getRandomProxy()
            yield PyppeteerRequest(url=self.start_urls[0], callback=self.turnPage, wait_until='networkidle0', wait_for='.pg-next', script=script, sleep=10, dont_filter=True, screenshot={
            'type': 'png',
            'fullPage': True
        }, meta={'page': i + 2})

    def turnPage(self, response):
        # 截图
        screenPngFileName = 'srcreenPng/screenshot' + str(response.meta['page']) + '.png'
        with open(screenPngFileName, 'wb') as file_object:
            file_object.write(response.meta['screenshot'].getbuffer())
        itemList = response.xpath('/html/body/div[3]/div[2]/div/div[4]/div/div/div[1]/div')
        print('第' + str(response.meta['page']) + '页/共' + str(len(itemList)) + '条数据')
        for item in itemList:
            aUrl = item.xpath('.//div[1]/a[1]/@href').get()
            mainPictureUrl = item.xpath('.//div[1]/a[1]/img/@src').get()
            # 【networkidle0：网页所有请求完成】
            yield PyppeteerRequest(url=self.start_urls[0]+aUrl, callback=self.parse, wait_until='networkidle0', timeout='480000', wait_for='.view_con', meta={'mainPictureUrl': mainPictureUrl}, sleep=10, dont_filter=True)

    # 转换爬取内容
    def parse(self, response):
        travelDay = response.xpath('/html/body/div[2]/div[3]/div[1]/div[2]/div/ul/li[2]')
        formatTravelDay = travelDay.xpath('string(.)').extract_first().replace('出行天数/', '').strip() if travelDay.xpath('string(.)').extract_first() is not None else travelDay.xpath('string(.)').extract_first()
        type = response.xpath('/html/body/div[2]/div[3]/div[1]/div[2]/div/ul/li[3]')
        formatType = type.xpath('string(.)').extract_first()
        title = response.xpath('/html/body/div[2]/div[1]/div[3]/div[2]/div/h1/text()').get()
        sendTime = response.xpath('/html/body/div[2]/div[3]/div[1]/div[2]/div/ul/li[1]')
        formatSendTime = sendTime.xpath('string(.)').extract_first().replace('出发时间/', '').strip() if sendTime.xpath('string(.)').extract_first() is not None else sendTime.xpath('string(.)').extract_first()
        html = response.xpath('/html/body/div[2]/div[3]/div[1]/div[3]/div[1]/div[2]').extract_first()
        formatHTML = parsePictureToDataUrl(html) if html is not None else None
        author = response.xpath('/html/body/div[2]/div[2]/div/div/div[2]/strong/a/@title').get()
        createTime = int(time.time())
        item = HronetItem(travelDay = formatTravelDay, type = formatType, title = title, html = formatHTML, author = author, createTime = createTime, sendTime = formatSendTime, mainPictureUrl = response.meta['mainPictureUrl'], isDeleted = 0)
        yield item

# 将HTML中img下载并转换成DataURL替换原来的图片地址
def parsePictureToDataUrl(html):
    soup = BeautifulSoup(html,'lxml')
    imgList = soup.find_all('img')
    for img in imgList:
        try:
            src = img['data-rt-src']
        except:
            try:
                src = img['data-src']
            except:
                src = img['src']
        dataType = 'data:image/jpeg;base64,'
        if 'gif' in src:
            dataType = 'data:image/gif;base64,'
        if 'jpeg' in src:
            dataType = 'data:image/jpeg;base64,'
        if 'png' in src:
            dataType = 'data:image/png;base64,'
        res = requests.get(src, stream=True)
        if res.status_code == 200:
            base64_data = base64.b64encode(res.content)
            img['src'] = dataType + str(base64_data, encoding = "utf-8")
            del img['data-src']
            del img['data-rt-src']
    html = ''
    try:
        html = htmlmin.minify(soup.prettify(), remove_comments=True, remove_empty_space=True)
    except:
        html = soup.prettify().strip().replace('\n',' ').replace('\t',' ').replace('\r',' ')
    return html