# -*- coding:utf8 -*-
'''
华禹教育独立学院解析
'''
import importlib
import re, sys, json, datetime, random, time
from scrapy.selector import Selector
from scrapy.exceptions import CloseSpider
try:
    from scrapy.spiders import Spider
except:
    from scrapy.spider import BaseSpider as Spider

from scrapy.http import Request, FormRequest
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc

from gaokaopai.items import *
from gaokaopai.dao import *
from gaokaopai.util import *

importlib.reload(sys)
# sys.setdefaultencoding("utf-8")

class HuaueDuliSpider(Spider):
    name        = 'huaue_duli'
    allow       = ['huaue.com']

    def __init__(self, *args, **kwargs):
        super(HuaueDuliSpider, self).__init__(*args, **kwargs)

        self.file_object = open('duli.txt', 'a+')

    def start_requests(self):
        yield Request("http://www.huaue.com/dlwz.HTM", callback=self.parse_list, dont_filter=True)

    def parse_list(self, response):
        base_url = get_base_url(response)

        for item_dom in response.xpath(u"//table[contains(@id, 'table2')]/.//td/a[contains(text(), '独立学院')]"):
            url = ''.join(item_dom.xpath(u"./@href").extract()).strip()

            yield Request(urljoin_rfc(base_url, url), callback=self.parse_detail, dont_filter=True)

    def parse_detail(self, response):
        for a_dom  in response.xpath(u"//table[@id='Change_zg']/.//a[not(contains(text(), 'http'))]"):
            name = ''.join(a_dom.xpath(u"./text()").extract()).strip()
            university = getUniversity(name)
            if university != None:
                sql = "update t_university set isduli=1 where id=%s" % university['id']
                executeSql(sql)
            else:
                self.file_object.write(name+"\n")
