# -*- coding:utf8 -*-
import importlib
import re, sys, json, datetime, random, time
from scrapy.selector import Selector
from scrapy.exceptions import CloseSpider
import urllib
import lxml.html
try:
    from scrapy.spiders import Spider
except:
    from scrapy.spider import BaseSpider as Spider

from scrapy.http import Request, FormRequest
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc

from gaokaopai.items import *
from gaokaopai.dao import *
from gaokaopai.util import *

import jieba

importlib.reload(sys)
#sys.setdefaultencoding("utf-8")

class HistorySpider(Spider):
    name        = 'gaokaopai_history'
    allow       = ['gaokaopai.com']

    def __init__(self, *args, **kwargs):
        super(HistorySpider, self).__init__(*args, **kwargs)

    def start_requests(self):
        yield Request("http://www.gaokaopai.com/daxue.html", callback=self.parse_list, dont_filter=True)

    def parse_list(self, response):
        base_url = get_base_url(response)

        for item_dom in response.xpath(u"//div[contains(@class, 'schoolList')]/ul/li"):
            url = ''.join(item_dom.xpath(u".//h3/a/@href").extract()).strip()

            # 学院id
            fid = getNum(getCode(url, 2))

            list_province = getListProvince()
            for province in list_province:
                url = "http://www.gaokaopai.com/daxue-luquxian-%s.html"%fid

                yield Request(url+"?cname=%s&km=%s&st=%s"%(str(province['gid'])+'||'+str(province['name']), '2||理科', 2), meta={'provinceid':province['id'], 'type':'1', 'fid':fid}, callback=self.parse_history_data, dont_filter=True)

                yield Request(url+"?cname=%s&km=%s&st=%s"%(str(province['gid'])+'||'+str(province['name']), '1||文科', 1), meta={'provinceid':province['id'], 'type':'2', 'fid':fid}, callback=self.parse_history_data, dont_filter=True)


                # # 专业
                # for year in [2016, 2015, 2014, 2013]:
                #     yield Request(url+"?cname=%s&km=%s&st=%s&year=%s"%(str(province['gid'])+'||'+str(province['name']), '2||理科', 2, year), meta={'provinceid':province['id'], 'type':'1', 'fid':fid, 'year':year}, callback=self.parse_profession_data, dont_filter=True)
                #
                #     yield Request(url+"?cname=%s&km=%s&st=%s&year=%s"%(str(province['gid'])+'||'+str(province['name']), '1||文科', 1, year), meta={'provinceid':province['id'], 'type':'2', 'fid':fid, 'year':year}, callback=self.parse_profession_data, dont_filter=True)

        '''分页效果'''
        next_page = ''.join(response.xpath(u"//div[contains(@class, 'pager')]/a[contains(text(), '下一页')]/@href").extract())
        if next_page != '':
            yield Request(urljoin_rfc(base_url, next_page), callback=self.parse_list, dont_filter=True)

    def parse_history_data(self, response):
        meta = response.meta

        for tr_dom in response.xpath(u"//div[contains(@class, 'markLineCon')][1]/table/.//tr"):
            if ''.join(tr_dom.xpath(u"./td[1][not(contains(text(), '没有查到'))]/text()").extract()).strip() != '':
                year    = ''.join(tr_dom.xpath(u"./td[1]/text()").extract()).strip()
                average = ''.join(tr_dom.xpath(u"./td[2]/text()").extract()).strip()
                lower   = ''.join(tr_dom.xpath(u"./td[3]/text()").extract()).strip()
                student = ''.join(tr_dom.xpath(u"./td[4]/text()").extract()).strip()
                line    = ''.join(tr_dom.xpath(u"./td[5]/text()").extract()).strip()
                batch   = ''.join(tr_dom.xpath(u"./td[6]/text()").extract()).strip()

                hd              = HistoryData()
                hd['table']     = 't_university_history_data'
                hd['fid']       = meta['fid']
                hd['provinceid']= meta['provinceid']
                hd['type']      = meta['type']
                hd['year']      = getNum(year)
                hd['batch']     = batch
                hd['average']   = average
                hd['lower']     = lower
                hd['student']   = student
                hd['line']      = line

                yield hd


            else:
                print("no data", response.url)

    def parse_profession_data(self, response):
        meta = response.meta

        html = ''.join(response.xpath(u"//div[contains(@class, 'markLineCon')][2]/table/node()").extract())
        html = html.replace('<!--','').replace('-->','')
        root = lxml.html.fromstring(html)

        if ''.join(root.xpath(u"//tr[2]/td[1][not(contains(text(), '没有查到'))]/text()")).strip() != '':
            for tr_dom in root.xpath(u"//tr"):
                name    = ''.join(tr_dom.xpath(u"./td[1]/text()")).strip()
                average = ''.join(tr_dom.xpath(u"./td[3]/text()")).strip()
                upper   = ''.join(tr_dom.xpath(u"./td[4]/text()")).strip()
                batch   = ''.join(tr_dom.xpath(u"./td[5]/text()")).strip()

                '''
                fid         = Field()
                provinceid  = Field() # 省
                type        = Field() # 1:理科；2:文科
                name        = Field() # 专业
                year        = Field() # 年
                batch       = Field() # 批次
                average     = Field() # 平均分
                upper       = Field() # 最高分
                '''

                if name != '':
                    pd              = ProfessionData()
                    pd['table']     = 't_university_profession_data'
                    pd['fid']       = meta['fid']
                    pd['provinceid']= meta['provinceid']
                    pd['type']      = meta['type']
                    pd['year']      = meta['year']
                    pd['name']      = name
                    pd['average']   = average
                    pd['upper']     = upper
                    pd['batch']     = batch

                    yield pd


            else:
                print("no data", response.url)

    def get_header(self, referer):

        agents = self.settings.getlist('USER_AGENTS')
        random_agent = random.choice(agents)

        headers = {
            'Referer': referer,
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate, sdch',
            'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6,zh-TW;q=0.4',
            'Host': 'land.fang.com',
            'Upgrade-Insecure-Requests': 1,
            'User-Agent': random_agent,
        }

        return headers



