# -*- coding: utf-8 -*-
import hashlib

import scrapy
import logging
import urlparse
from scrapy.http import FormRequest,Request
from scrapy.utils.python import to_bytes

from steam_data_collect.exceptions import RuleGetError
from steam_data_collect.items import NasaSteamDataCollectItem, imageitem
from steam_data_collect.models import nasa_rule_infos

loger=logging.getLogger(__file__)

class NasaSteamSpider(scrapy.Spider):
    name = 'nasa_steam'
    allowed_domains = ['jpl.nasa.gov']
    start_urls = ['https://www.jpl.nasa.gov/edu/teach/fetch_pages.php?totalpages=true&']

    custom_settings = {'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',
                       'COOKIES_ENABLED': False,
                       'DOWNLOAD_TIMEOUT': 60,
                       }

    
    def __init__(self,*a,**kw):
        super(NasaSteamSpider,self).__init__(*a,**kw)
        self.rule_obj=nasa_rule_infos.select().first()
        if None==self.rule_obj:
            loger.error(RuleGetError)
            raise RuleGetError
    
    def parse(self, response):
        page_number=response.text
        try:
            page_number=int(1)
        except Exception,e:
            loger.error(e)
            raise e
        post_url="https://www.jpl.nasa.gov/edu/teach/fetch_pages.php?"
        referer_url="https://www.jpl.nasa.gov/edu/teach/"
        for i in range(page_number):
            request_obj=FormRequest(url=post_url,formdata={"page":"{}".format(i+1)},headers={"Referer":referer_url},callback=self.content_list_parse)
            yield request_obj


    def content_list_parse(self,response):
        herf_url_xpath="//li[@class='slide']/div[@class='image_and_description_container']/div[@class='bottom_gradient']/div/h3/a/@href"  #内容详情页的url
        ourl=urlparse.urlparse(response.url)
        url_list=response.xpath(herf_url_xpath).extract()
        for one_url in url_list:
            url_next=ourl.scheme+"://"+ourl.netloc+one_url
            yield Request(url=url_next,callback=self.content_detail_parse)


    def content_detail_parse(self,response):
        loger.info(response.url)

        if self.rule_obj.class_title != '' and self.rule_obj.class_title != None:
            class_title = response.xpath(self.rule_obj.class_title).extract()
            # class_title=u''.join(class_title)
        else:
            class_title = []
        if self.rule_obj.class_desc != '' and self.rule_obj.class_desc != None:
            class_desc = response.xpath(self.rule_obj.class_desc).extract()
            # class_desc=u''.join(class_desc)
        else:
            class_desc = []
        if self.rule_obj.class_img != '' and self.rule_obj.class_img != None:
            class_img = response.xpath(self.rule_obj.class_img).extract()
            # class_img=u';'.join(class_img)
        else:
            class_img = []
        if self.rule_obj.challenge_title != '' and self.rule_obj.challenge_title != None:
            challenge_title = response.xpath(self.rule_obj.challenge_title).extract()
            # challenge_title=u''.join(challenge_title)
        else:
            challenge_title = []
        if self.rule_obj.challenge_desc != '' and self.rule_obj.challenge_desc != None:
            challenge_desc = response.xpath(self.rule_obj.challenge_desc).extract()
            # challenge_desc=u''.join(challenge_desc)
        else:
            challenge_desc = []
        if self.rule_obj.objective != '' and self.rule_obj.objective != None:
            objective = response.xpath(self.rule_obj.objective).extract()
            # objective=u''.join(objective)
        else:
            objective = []
        if self.rule_obj.background_information != '' and self.rule_obj.background_information != None:
            background_information = response.xpath(self.rule_obj.background_information).extract()
            # background_information=u''.join(background_information)
        else:
            background_information = []
        if self.rule_obj.science_skills != '' and self.rule_obj.science_skills != None:
            science_skills = response.xpath(self.rule_obj.science_skills).extract()
            # science_skills=u''.join(science_skills)
        else:
            science_skills = []
        if self.rule_obj.technology_skills != '' and self.rule_obj.technology_skills != None:
            technology_skills = response.xpath(self.rule_obj.technology_skills).extract()
            # technology_skills=u''.join(technology_skills)
        else:
            technology_skills = []
        if self.rule_obj.engineering_skills != '' and self.rule_obj.engineering_skills != None:
            engineering_skills = response.xpath(self.rule_obj.engineering_skills).extract()
            # engineering_skills=u''.join(engineering_skills)
        else:
            engineering_skills = []
        if self.rule_obj.arts_skills != '' and self.rule_obj.arts_skills != None:
            arts_skills = response.xpath(self.rule_obj.arts_skills).extract()
            # arts_skills=u''.join(arts_skills)
        else:
            arts_skills = []
        if self.rule_obj.math_skills != '' and self.rule_obj.math_skills != None:
            math_skills = response.xpath(self.rule_obj.math_skills).extract()
            # math_skills=u''.join(math_skills)
        else:
            math_skills = []
        if self.rule_obj.directions != '' and self.rule_obj.directions != None:
            directions = response.xpath(self.rule_obj.directions).extract()
            # directions=u''.join(directions)
        else:
            directions = []
        if self.rule_obj.materials != '' and self.rule_obj.materials != None:
            materials = response.xpath(self.rule_obj.materials).extract()
            # materials=u''.join(materials)
        else:
            materials = []
        if self.rule_obj.wrap_up != '' and self.rule_obj.wrap_up != None:
            wrap_up = response.xpath(self.rule_obj.wrap_up).extract()
            # wrap_up=u''.join(wrap_up)
        else:
            wrap_up = []

        if self.rule_obj.extended_content != '' and self.rule_obj.extended_content != None:
            extended_content = response.xpath(self.rule_obj.extended_content).extract()
            # extended_picture=u';'.join(extended_picture)
        else:
            extended_content = []

        if self.rule_obj.extended_picture != '' and self.rule_obj.extended_picture != None:
            extended_picture = response.xpath(self.rule_obj.extended_picture).extract()
            # extended_picture=u';'.join(extended_picture)
        else:
            extended_picture = []
        if self.rule_obj.achievements_picture != '' and self.rule_obj.achievements_picture != None:
            achievements_picture = response.xpath(self.rule_obj.achievements_picture).extract()
            # achievements_picture=u';'.join(achievements_picture)
        else:
            achievements_picture = []

        item_obj = NasaSteamDataCollectItem()

        item_obj['url'] = response.url
        item_obj["class_title"] = class_title
        item_obj["class_desc"] = class_desc
        item_obj["class_img"] = class_img
        item_obj["challenge_title"] = challenge_title
        item_obj["challenge_desc"] = challenge_desc
        item_obj["objective"] = objective
        item_obj["background_information"] = background_information
        item_obj["science_skills"] = science_skills
        item_obj["technology_skills"] = technology_skills
        item_obj["engineering_skills"] = engineering_skills
        item_obj["arts_skills"] = arts_skills
        item_obj["math_skills"] = math_skills
        item_obj["directions"] = directions
        item_obj["materials"] = materials
        item_obj["wrap_up"] = wrap_up
        item_obj["extended_content"] = extended_content
        item_obj["extended_picture"] = extended_picture
        item_obj["achievements_picture"] = achievements_picture

        print(class_img)
        img_name_list = []
        for sub in class_img:
            image_guid = hashlib.sha1(to_bytes(sub)).hexdigest()
            img_name_list.append('full/' + image_guid + '.jpg')
            image_obj = imageitem()
            image_obj['image_urls'] = [sub]
            yield image_obj
        item_obj["class_img"] = img_name_list

        img_name_list = []
        for sub in extended_picture:
            image_guid = hashlib.sha1(to_bytes(sub)).hexdigest()
            img_name_list.append('full/' + image_guid + '.jpg')
            image_obj = imageitem()
            image_obj['image_urls'] = [sub]
            yield image_obj
        item_obj["extended_picture"] = img_name_list

        img_name_list = []
        for sub in achievements_picture:
            image_guid = hashlib.sha1(to_bytes(sub)).hexdigest()
            img_name_list.append('full/' + image_guid + '.jpg')
            image_obj = imageitem()
            image_obj['image_urls'] = [sub]
            yield image_obj
        item_obj["achievements_picture"] = img_name_list

        yield item_obj