# -*- coding:utf8 -*-
import importlib
import re, sys, json, datetime, random, time
from scrapy.selector import Selector
from scrapy.exceptions import CloseSpider
import urllib
import lxml.html
try:
    from scrapy.spiders import Spider
except:
    from scrapy.spider import BaseSpider as Spider

from scrapy.http import Request, FormRequest
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc

from gaokaopai.items import *
from gaokaopai.dao import *
from gaokaopai.util import *

importlib.reload(sys)
# sys.setdefaultencoding("utf-8")

class ScholarshipSpider(Spider):
    name        = 'scholarship'
    allow       = ['baokaodaxue.com']

    def __init__(self, *args, **kwargs):
        super(ScholarshipSpider, self).__init__(*args, **kwargs)

    def start_requests(self):
        yield Request("http://www.baokaodaxue.com/bkdx/search/jxj", callback=self.parse_list, meta={'page':1}, dont_filter=True)

    def parse_list(self, response):
        meta = response.meta

        for dom_name in response.xpath(u"//div[contains(@class, 'result-college-name')]"):
            name = ''.join(dom_name.xpath(u"./text()").extract()).strip()

            for dom_tr in dom_name.xpath(u"./following-sibling::table[1]/.//tr"):
                title = ''.join(dom_tr.xpath("./td[1]/text()").extract()).strip()
                if title != '':
                    amount = ''.join(dom_tr.xpath("./td[2]/text()").extract()).strip()
                    standard = ''.join(dom_tr.xpath("./td[3]/text()").extract()).strip()
                    people = ''.join(dom_tr.xpath("./td[4]/text()").extract()).strip()

                    row             = Scholarship()
                    row['table']    = 't_scholarship'
                    row['name']     = name
                    row['title']    = title
                    row['amount']   = amount
                    row['standard'] = standard
                    row['people']   = people

                    yield row

        '''分页效果'''
        page = int(meta['page'])
        if page < 1326:
            next = str(page + 1)
            yield Request("http://www.baokaodaxue.com/bkdx/search/jxj?page=%s"%next, callback=self.parse_list, meta={'page':page + 1}, dont_filter=True)

