# -*- coding:utf8 -*-
import importlib
import re, sys, json, datetime, random, time
from scrapy.selector import Selector
from scrapy.exceptions import CloseSpider
try:
    from scrapy.spiders import Spider
except:
    from scrapy.spider import BaseSpider as Spider

from scrapy.http import Request, FormRequest
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc

from gaokaopai.items import *
from gaokaopai.dao import *
from gaokaopai.util import *

importlib.reload(sys)
#sys.setdefaultencoding("utf-8")

class ProfessionSpider(Spider):
    name        = 'gaokaopai_profession'
    allow       = ['gaokaopai.com']

    def __init__(self, *args, **kwargs):
        self.type = 1
        super(ProfessionSpider, self).__init__(*args, **kwargs)

    def start_requests(self):
        yield Request("http://www.gaokaopai.com/zhuanye-0-0-1.html", callback=self.parse_first, dont_filter=True)

    def parse_first(self, response):
        base_url = get_base_url(response)

        for first_dom in response.xpath(u"//h3[contains(text(), '学科门类')]/following-sibling::ul[1]/li/a"):
            name    = ''.join(first_dom.xpath(u"./text()").extract())
            url     = ''.join(first_dom.xpath(u"./@href").extract())
            code    = getCode(url, 1)

            '''门类'''
            profession          = Profession()
            profession['table'] = 't_major'
            profession['url']   = url
            profession['name']  = name
            profession['type']  = self.type
            profession['level'] = 1
            profession['code']  = code
            profession['age']   = ''
            profession['diploma']   = ''
            profession['course'] = ''
            profession['intro'] = ''
            profession['job']   = ''

            yield profession

            yield Request(urljoin_rfc(base_url, url), callback=self.parse_second, dont_filter=True)

    def parse_second(self, response):
        base_url = get_base_url(response)

        for first_dom in response.xpath(u"//h3[contains(text(), '专业类别')]/following-sibling::ul[1]/li/a"):
            name    = ''.join(first_dom.xpath(u"./text()").extract())
            url     = ''.join(first_dom.xpath(u"./@href").extract())
            code    = getCode(url, 2)

            '''学科'''
            profession          = Profession()
            profession['table'] = 't_major'
            profession['url']   = url
            profession['name']  = name
            profession['type']  = self.type
            profession['level'] = 2
            profession['code']  = code
            profession['age']   = ''
            profession['diploma']   = ''
            profession['course'] = ''
            profession['intro'] = ''
            profession['job']   = ''

            yield profession

            yield Request(urljoin_rfc(base_url, url), callback=self.parse_three, dont_filter=True)

    def parse_three(self, response):
        base_url = get_base_url(response)

        for three_dom in response.xpath(u"//div[contains(@class, 'majorDef')]/ul/li/a"):
            url = ''.join(three_dom.xpath(u"./@href").extract())

            yield Request(urljoin_rfc(base_url, url), callback=self.parse_zhuanye, dont_filter=True)

    def parse_zhuanye(self, response):
        name    = ''.join(response.xpath(u"//div[@class='majorTitle']/h1/text()").extract())
        code    = ''.join(response.xpath(u"//div[@class='majorBase']/h3[contains(text(), '专业代码')]/text()").extract()).strip().replace('专业代码：', '')
        age     = ''.join(response.xpath(u"//div[@class='majorBase']/h3[contains(text(), '修学年限')]/text()").extract()).strip().replace('修学年限：', '')
        diploma = ''.join(response.xpath(u"//div[@class='majorBase']/h3[contains(text(), '授予学位')]/text()").extract()).strip().replace('授予学位：', '')
        course  = ''.join(response.xpath(u"//h3[contains(text(), '开设课程')]/following-sibling::p/text()").extract()).strip()
        intro   = ''.join(response.xpath(u"//div[@class='majorCon']/node()").extract()).strip()

        '''就业前景'''
        url = ''.join(response.xpath(u"//li/a[contains(text(), '就业前景')]/@href").extract())
        if url == '':

            profession          = Profession()
            profession['table'] = 't_major'
            profession['url']   = response.url
            profession['name']  = name
            profession['type']  = self.type
            profession['level'] = 3
            profession['code']  = code
            profession['age']   = age
            profession['diploma'] = diploma
            profession['course'] = course
            profession['intro'] = intro
            profession['job']   = ''

            yield profession

        else:
            yield Request(url, callback=self.parse_jiuye, dont_filter=True, meta={'code':code, 'name':name, 'age':age, 'diploma':diploma, 'course':course, 'intro':intro})


    def parse_jiuye(self, response):
        meta = response.meta

        job = ''.join(response.xpath(u"//div[@class='mTxt']/node()").extract()).strip()

        profession          = Profession()
        profession['table'] = 't_major'
        profession['url']   = response.url
        profession['name']  = meta['name']
        profession['type']  = self.type
        profession['level'] = 3
        profession['code']  = meta['code']
        profession['age']   = meta['age']
        profession['diploma'] = meta['diploma']
        profession['course'] = meta['course']
        profession['intro'] = meta['intro']
        profession['job']   = job

        yield profession