# -*- coding:utf8 -*-
import importlib
import re, sys, json, datetime, random, time
from scrapy.selector import Selector
from scrapy.exceptions import CloseSpider
try:
    from scrapy.spiders import Spider
except:
    from scrapy.spider import BaseSpider as Spider

from scrapy.http import Request, FormRequest
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc

from gaokaopai.items import *
from gaokaopai.dao import *
from gaokaopai.util import *
import jieba

importlib.reload(sys)
# sys.setdefaultencoding("utf-8")

class UniversitySpider(Spider):
    name        = 'gaokaopai_university'
    allow       = ['gaokaopai.com']

    def __init__(self, *args, **kwargs):
        super(UniversitySpider, self).__init__(*args, **kwargs)

    def start_requests(self):
        yield Request("http://www.gaokaopai.com/daxue-0-0-0-0-0-0-0.html", callback=self.parse_list, dont_filter=True)

    def parse_list(self, response):
        base_url = get_base_url(response)

        for item_dom in response.xpath(u"//div[contains(@class, 'schoolList')]/ul/li"):
            rank = getNum(''.join(item_dom.xpath(u".//div[contains(@class, 'like')]/text()").extract()).strip())
            url = ''.join(item_dom.xpath(u".//h3/a/@href").extract()).strip()

            name = ''.join(item_dom.xpath(u".//h3/a/text()").extract()).strip()

            # 学院id
            fid = getNum(getCode(url, 2))


            university = getUniversityByFid(fid)
            if university != None and name != university['name']:
                print("======="*10)
                print(university['name'] , "======>" , name)
                print("========"*10)
                updateUniversityName(university['id'], name)

            # if university == None:
            #
            #     # file_object = open('error.txt', 'a+')
            #     # file_object.write(name+":"+str(fid)+"\n")
            #     # file_object.close()
            #     #
            #     # print "======"*20
            #     # print name , fid
            #     # print "======"*20
            #
            #     '''解析详情'''
            #     yield Request(urljoin_rfc(base_url, url), callback=self.parse_university, meta={'rank':rank, 'fid':fid}, dont_filter=True)
            #
            #     '''招生简章'''
            #     yield Request("http://www.gaokaopai.com/daxue-zhinan-%s.html"%fid, callback=self.parse_zhinan, meta={'fid':fid, 'name':name}, dont_filter=True)
            #
            #     '''自主招生'''
            #     yield Request("http://www.gaokaopai.com/daxue-zizhuzhaosheng-%s.html"%fid, callback=self.parse_zhaosheng, meta={'fid':fid, 'name':name}, dont_filter=True)
            #
            #     '''保送生'''
            #     yield Request("http://www.gaokaopai.com/daxue-baosongsheng-%s.html"%fid, callback=self.parse_baosongsheng, meta={'fid':fid, 'name':name}, dont_filter=True)
            #
            #     '''特长生招生'''
            #     yield Request("http://www.gaokaopai.com/daxue-techangsheng-%s.html"%fid, callback=self.parse_techangsheng, meta={'fid':fid, 'name':name}, dont_filter=True)
            #
            #     '''大学专业采集'''
            #     yield Request("http://www.gaokaopai.com/daxue-zhuanye-%s.html"%fid, callback=self.parse_university_profession, meta={'rank':rank, 'fid':fid}, dont_filter=True)

        '''分页效果'''
        next_page = ''.join(response.xpath(u"//div[contains(@class, 'pager')]/a[contains(text(), '下一页')]/@href").extract())
        if next_page != '':
            yield Request(urljoin_rfc(base_url, next_page), callback=self.parse_list, dont_filter=True)

    def parse_university_profession(self, response):
        meta = response.meta

        for dom in response.xpath(u"//div[contains(@class, 'catTitle')]/h2[contains(text(), '专科专业')]/parent::div/following-sibling::div"):
            if 'catTitle' in ''.join(dom.xpath(u"./@class").extract()):
                break

            kind = ''.join(dom.xpath("./h3/text()").extract()).strip()
            num = kind.find(u'（')
            kind = kind[1:num].strip()

            for li_dom in dom.xpath("./ul/li"):
                '''有详情'''
                if ''.join(li_dom.xpath(u"./a/@href").extract()) != '':
                    name = ''.join(li_dom.xpath(u"./a/text()").extract()).strip()
                    yield Request(''.join(li_dom.xpath(u"./a/@href").extract()), callback=self.parse_up, meta={'fid':meta['fid'], 'kind':kind, 'type':1, 'name':name})
                else:
                    up = UProfession()
                    up['table'] = 't_university_major'
                    up['fid']   = meta['fid']
                    up['kind']  = ''
                    up['name']  = ''.join(li_dom.xpath(u"./text()").extract()).strip()
                    up['type']  = 1

                    yield up

        for dom in response.xpath(u"//div[contains(@class, 'catTitle')]/h2[contains(text(), '本科专业')]/parent::div/following-sibling::div"):
            if 'catTitle' in ''.join(dom.xpath(u"./@class").extract()):
                break

            kind = ''.join(dom.xpath("./h3/text()").extract()).strip()
            num = kind.find(u'（')
            kind = kind[1:num].strip()

            for li_dom in dom.xpath("./ul/li"):
                '''有详情'''
                if ''.join(li_dom.xpath(u"./a/@href").extract()) != '':
                    name = ''.join(li_dom.xpath(u"./a/text()").extract()).strip()
                    yield Request(''.join(li_dom.xpath(u"./a/@href").extract()), callback=self.parse_up, meta={'fid':meta['fid'], 'kind':kind, 'type':2, 'name':name})
                else:
                    up = UProfession()
                    up['table'] = 't_university_major'
                    up['fid']   = meta['fid']
                    up['kind']  = ''
                    up['name']  = ''.join(li_dom.xpath(u"./text()").extract()).strip()
                    up['type']  = 2

                    yield up

    def parse_up(self, response):
        meta = response.meta

        content = ''.join(response.xpath(u"//div[contains(@class, 'catTitle')]/following-sibling::div[1]/node()").extract())

        code = '';
        url = ''.join(response.xpath(u"//a[contains(text(), '专业解读')]/@href").extract()).strip()
        if url != '':
            code = getCode(url, 2).replace('.html', '')

        up = UProfession()
        up['table'] = 't_university_major'
        up['fid']   = meta['fid']
        up['kind']  = meta['kind']
        up['name']  = meta['name']
        up['content'] = content
        up['code']  = code
        up['type']  = meta['type']

        yield up

    def parse_university(self, response):
        meta = response.meta

        '''
        regulations = Field()   # 招生简章
        oneself     = Field()   # 自主招生
        recommend   = Field()   # 保送生
        excellent   = Field()   # 特长生
        '''

        university                  = University()
        university['table']         = 't_university'
        university['fid']           = meta['fid']
        university['url']           = response.url
        university['name']          = ''.join(response.xpath(u"//div[contains(@class, 'schoolName')]/strong/text()").extract()).strip()
        university['ename']         = ''.join(response.xpath(u"//div[contains(@class, 'enName')]/text()").extract()).strip()
        university['rank']          = meta['rank']
        university['category']      = ''.join(response.xpath(u"//li/span[contains(text(), '学校类型')]/following-sibling::div/text()").extract()).strip()
        university['feature']       = ','.join(response.xpath(u"//div[contains(@class, 'schoolName')]/div[contains(@class, 'st')]/img/@title").extract()).strip()
        university['cityinfo']      = ' '.join(jieba.cut(''.join(response.xpath(u"//li/label[contains(text(), '所处城市')]/parent::li/text()").extract()).strip()))
        university['address']       = ''.join(response.xpath(u"//li/label[contains(text(), '学校地址')]/parent::li/text()").extract()).strip()
        university['tel']           = ''.join(response.xpath(u"//li/label[contains(text(), '招生电话')]/parent::li/text()").extract()).strip()
        university['email']         = ''.join(response.xpath(u"//li/label[contains(text(), '电子邮箱')]/parent::li/text()").extract()).strip()

        university['gov_url']       = ''.join(response.xpath(u"//a[contains(text(), '进入官网')]/@href").extract()).strip()
        university['enroll_url']    = ''.join(response.xpath(u"//a[contains(text(), '招生网站')]/@href").extract()).strip()

        university['logo']          = ''.join(response.xpath(u"//div[contains(@class, 'schoolLogo')]/img/@src").extract()).strip()
        university['intro']         = ''.join(response.xpath(u"//div[contains(@class, 'intro')]/node()").extract()).strip()
        university['found_time']    = getNum(''.join(response.xpath(u"//li/span[contains(text(), '创建时间')]/following-sibling::div/text()").extract()).strip())
        university['subjection']    = ''.join(response.xpath(u"//li/span[contains(text(), '隶属于')]/following-sibling::div/text()").extract())

        university['student']       = ''.join(response.xpath(u"//li/span[contains(text(), '学生人数')]/following-sibling::div/text()").extract())
        university['doctor']        = ''.join(response.xpath(u"//li/span[contains(text(), '博士点个数')]/following-sibling::div/text()").extract())
        university['master']        = ''.join(response.xpath(u"//li/span[contains(text(), '硕士点个数')]/following-sibling::div/text()").extract())
        university['academician']   = ''.join(response.xpath(u"//li/span[contains(text(), '院士人数')]/following-sibling::div/text()").extract())
        university['key_course']    = ''.join(response.xpath(u"//li/span[contains(text(), '重点学科')]/following-sibling::div/text()").extract())
        university['job']           = ''.join(response.xpath(u"//h2[contains(text(), '就业情况')]/parent::div/following-sibling::div[1]/node()").extract())
        university['come_data']     = ''.join(re.findall(r'学生占比\',(.*?)}]', response.body, re.S)).strip()
        university['male_female']   = ''.join(response.xpath(u"//div[@class='tip'][contains(text(),'男生')]/text()").extract()).strip('\n')
        university['fee']           = ''.join(response.xpath(u"//h2[contains(text(), '学费信息')]/parent::div/following-sibling::div[1]/node()").extract())

        yield university

        '''特色专业'''
        if len(response.xpath(u"//li/h2[contains(text(), '特色专业')]").extract()) > 0:
            for dom in response.xpath(u"//div[contains(@class, 'modContent')]/div[1]//h3"):
                name    = ''.join(dom.xpath("./text()").extract()).strip()
                content = ''.join(dom.xpath("./following-sibling::p[1]/text()").extract())

                up          = UPoint()
                up['table'] = 't_university_point'
                up['fid']   = meta['fid']
                up['type']  = 1
                up['name']  = name
                up['content']   = content

                yield up

        '''重点专业'''
        if len(response.xpath(u"//li/h2[contains(text(), '重点专业')]").extract()) > 0:
            num = 2
            if len(response.xpath(u"//li/h2[contains(text(), '特色专业')]").extract()) == 0:
                num = 1

            for dom in response.xpath(u"//div[contains(@class, 'modContent')]/div["+str(num)+"]//h3"):
                tips    = ''.join(dom.xpath("./text()").extract()).strip()

                for li_dom in dom.xpath("./following-sibling::ul[1]/li"):
                    li_url  = ''.join(li_dom.xpath(u"./a/@href").extract())
                    name    = ''.join(li_dom.xpath(u"./a/text()").extract())
                    code    = getCode(li_url, 2).replace('.html', '')

                    up          = UPoint()
                    up['table'] = 't_university_point'
                    up['fid']   = meta['fid']
                    up['type']  = 2
                    up['tips']  = tips
                    up['name']  = name
                    up['code']  = code

                    yield up


    def parse_zhinan(self, response):
        meta = response.meta

        guide               = Guide()
        guide['table']      = 't_guide'
        guide['fid']        = meta['fid']
        guide['year']       = 2017
        guide['type']       = 1
        guide['title']      = "2017年"+meta['name']+"招生简章"
        guide['content']    = ''.join(response.xpath("//div[@class='catTitleText']/div[@class='txt']/node()").extract())

        yield guide

    def parse_zhaosheng(self, response):
        meta = response.meta

        guide               = Guide()
        guide['table']      = 't_guide'
        guide['fid']        = meta['fid']
        guide['year']       = 2017
        guide['type']       = 2
        guide['title']      = "2017年"+meta['name']+"自主招生简章"
        guide['content']    = ''.join(response.xpath("//div[@class='catTitleText']/div[@class='txt']/node()").extract())

        yield guide

    def parse_baosongsheng(self, response):
        meta = response.meta

        guide               = Guide()
        guide['table']      = 't_guide'
        guide['fid']        = meta['fid']
        guide['year']       = 2017
        guide['type']       = 3
        guide['title']      = "2017年"+meta['name']+"保送生简章"
        guide['content']    = ''.join(response.xpath("//div[@class='catTitleText']/div[@class='txt']/node()").extract())

        yield guide

    def parse_techangsheng(self, response):
        meta = response.meta

        guide               = Guide()
        guide['table']      = 't_guide'
        guide['fid']        = meta['fid']
        guide['year']       = 2017
        guide['type']       = 4
        guide['title']      = "2017年"+meta['name']+"特长生简章"
        guide['content']    = ''.join(response.xpath("//div[@class='catTitleText']/div[@class='txt']/node()").extract())

        yield guide