# -*- coding:utf8 -*-
import importlib
import re, sys, json, datetime, random, time
from scrapy.selector import Selector
from scrapy.exceptions import CloseSpider
from lxml import etree
try:
    from scrapy.spiders import Spider
except:
    from scrapy.spider import BaseSpider as Spider

from scrapy.http import Request, FormRequest
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from scrapy.http import HtmlResponse

from gaokaopai.items import *
from gaokaopai.dao import *
from gaokaopai.util import *
from bs4 import BeautifulSoup
import requests
import lxml.html

importlib.reload(sys)
#sys.setdefaultencoding("utf-8")

class MajorRankSpider(Spider):
    name        = 'gaokaopai_major'
    allow       = ['gaokaopai.com']

    def __init__(self, *args, **kwargs):
        self.type = 2
        super(MajorRankSpider, self).__init__(*args, **kwargs)

    def start_requests(self):
        yield Request("http://www.gaokaopai.com/zhuanye.html", callback=self.parse_list, dont_filter=True)

    def parse_list(self, response):
        for first_dom in response.xpath(u"//ul[contains(@class, 'list')]/li/a"):
            url     = ''.join(first_dom.xpath(u"./@href").extract())
            yield Request(url, callback=self.parse_major, dont_filter=True)

    def parse_major(self, response):
        code = ''.join(response.xpath(u"//div[@class='majorBase']/h3[contains(text(), '专业代码')]/text()").extract()).strip().replace('专业代码：', '')

        major = getMajor(code)

        if major == None or major['type'] == 1:
            pass
        else:
            # 专业排行
            rank_url = ''.join(response.xpath(u"//div[@class='majorTab']/.//a[contains(text(), '专业排名')]/@href").extract())
            if rank_url != '':
                #yield Request(rank_url, callback=self.parse_rank, dont_filter=True)
                r = requests.get(rank_url)

                html = r.text.replace('</p>', '</p><p>')
                #
                # print html

                # soup = BeautifulSoup(html,'lxml')
                # fixed_html = soup.prettify()
                root = lxml.html.fromstring(html)

                for p_dom in root.xpath(u"//div[@class='majorCon']/p[not(contains(text(), '排名'))]"):

                    if len(p_dom.xpath(u"./text()"))==2:
                        rank_num = p_dom.xpath(u"./text()")[0].strip().replace('.', '')
                        rank_name = p_dom.xpath(u"./text()")[1].strip()
                    else:
                        rank_num = p_dom.xpath(u"./text()")[0].strip().replace('.', '')
                        rank_name = ''

                    name = ''.join(p_dom.xpath(u"./a/span/text()")).strip()
                    if name != '':
                        university = getUniversity(name)

                        if university != None:
                            university_major = getUniversityMajor(major['name'], university['fid'])

                            flag = 0
                            if university_major != None:
                                flag = 1
                                updateUniversityMajor(university_major['id'], int(rank_num), rank_name)

                            major_rank = getMajorRank(major['name'], university['fid'])
                            if major_rank == None:
                                insertMajorRank(university['fid'], major['name'], 2, rank_num, rank_name, flag)
